2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc_link_dp.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "amdgpu_dm_trace.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ucode.h"
47 #include "amdgpu_dm.h"
48 #ifdef CONFIG_DRM_AMD_DC_HDCP
49 #include "amdgpu_dm_hdcp.h"
50 #include <drm/drm_hdcp.h>
52 #include "amdgpu_pm.h"
54 #include "amd_shared.h"
55 #include "amdgpu_dm_irq.h"
56 #include "dm_helpers.h"
57 #include "amdgpu_dm_mst_types.h"
58 #if defined(CONFIG_DEBUG_FS)
59 #include "amdgpu_dm_debugfs.h"
62 #include "ivsrcid/ivsrcid_vislands30.h"
64 #include "i2caux_interface.h"
65 #include <linux/module.h>
66 #include <linux/moduleparam.h>
67 #include <linux/types.h>
68 #include <linux/pm_runtime.h>
69 #include <linux/pci.h>
70 #include <linux/firmware.h>
71 #include <linux/component.h>
73 #include <drm/drm_atomic.h>
74 #include <drm/drm_atomic_uapi.h>
75 #include <drm/drm_atomic_helper.h>
76 #include <drm/drm_dp_mst_helper.h>
77 #include <drm/drm_fb_helper.h>
78 #include <drm/drm_fourcc.h>
79 #include <drm/drm_edid.h>
80 #include <drm/drm_vblank.h>
81 #include <drm/drm_audio_component.h>
83 #if defined(CONFIG_DRM_AMD_DC_DCN)
84 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
86 #include "dcn/dcn_1_0_offset.h"
87 #include "dcn/dcn_1_0_sh_mask.h"
88 #include "soc15_hw_ip.h"
89 #include "vega10_ip_offset.h"
91 #include "soc15_common.h"
94 #include "modules/inc/mod_freesync.h"
95 #include "modules/power/power_helpers.h"
96 #include "modules/inc/mod_info_packet.h"
98 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
100 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
102 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
104 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
106 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
108 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
110 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
113 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
114 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
116 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
117 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
119 /* Number of bytes in PSP header for firmware. */
120 #define PSP_HEADER_BYTES 0x100
122 /* Number of bytes in PSP footer for firmware. */
123 #define PSP_FOOTER_BYTES 0x100
128 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
129 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
130 * requests into DC requests, and DC responses into DRM responses.
132 * The root control structure is &struct amdgpu_display_manager.
135 /* basic init/fini API */
136 static int amdgpu_dm_init(struct amdgpu_device *adev);
137 static void amdgpu_dm_fini(struct amdgpu_device *adev);
138 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
140 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
142 switch (link->dpcd_caps.dongle_type) {
143 case DISPLAY_DONGLE_NONE:
144 return DRM_MODE_SUBCONNECTOR_Native;
145 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
146 return DRM_MODE_SUBCONNECTOR_VGA;
147 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
148 case DISPLAY_DONGLE_DP_DVI_DONGLE:
149 return DRM_MODE_SUBCONNECTOR_DVID;
150 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
151 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
152 return DRM_MODE_SUBCONNECTOR_HDMIA;
153 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
155 return DRM_MODE_SUBCONNECTOR_Unknown;
159 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
161 struct dc_link *link = aconnector->dc_link;
162 struct drm_connector *connector = &aconnector->base;
163 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
165 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
168 if (aconnector->dc_sink)
169 subconnector = get_subconnector_type(link);
171 drm_object_property_set_value(&connector->base,
172 connector->dev->mode_config.dp_subconnector_property,
177 * initializes drm_device display related structures, based on the information
178 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
179 * drm_encoder, drm_mode_config
181 * Returns 0 on success
183 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
184 /* removes and deallocates the drm structures, created by the above function */
185 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
187 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
188 struct drm_plane *plane,
189 unsigned long possible_crtcs,
190 const struct dc_plane_cap *plane_cap);
191 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
192 struct drm_plane *plane,
193 uint32_t link_index);
194 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
195 struct amdgpu_dm_connector *amdgpu_dm_connector,
197 struct amdgpu_encoder *amdgpu_encoder);
198 static int amdgpu_dm_encoder_init(struct drm_device *dev,
199 struct amdgpu_encoder *aencoder,
200 uint32_t link_index);
202 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
204 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
206 static int amdgpu_dm_atomic_check(struct drm_device *dev,
207 struct drm_atomic_state *state);
209 static void handle_cursor_update(struct drm_plane *plane,
210 struct drm_plane_state *old_plane_state);
212 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
213 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
214 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
215 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
216 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
218 static const struct drm_format_info *
219 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
222 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
223 struct drm_crtc_state *new_crtc_state);
225 * dm_vblank_get_counter
228 * Get counter for number of vertical blanks
231 * struct amdgpu_device *adev - [in] desired amdgpu device
232 * int disp_idx - [in] which CRTC to get the counter from
235 * Counter for vertical blanks
237 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
239 if (crtc >= adev->mode_info.num_crtc)
242 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
244 if (acrtc->dm_irq_params.stream == NULL) {
245 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
250 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
254 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
255 u32 *vbl, u32 *position)
257 uint32_t v_blank_start, v_blank_end, h_position, v_position;
259 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
262 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
264 if (acrtc->dm_irq_params.stream == NULL) {
265 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
271 * TODO rework base driver to use values directly.
272 * for now parse it back into reg-format
274 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
280 *position = v_position | (h_position << 16);
281 *vbl = v_blank_start | (v_blank_end << 16);
287 static bool dm_is_idle(void *handle)
293 static int dm_wait_for_idle(void *handle)
299 static bool dm_check_soft_reset(void *handle)
304 static int dm_soft_reset(void *handle)
310 static struct amdgpu_crtc *
311 get_crtc_by_otg_inst(struct amdgpu_device *adev,
314 struct drm_device *dev = adev_to_drm(adev);
315 struct drm_crtc *crtc;
316 struct amdgpu_crtc *amdgpu_crtc;
318 if (otg_inst == -1) {
320 return adev->mode_info.crtcs[0];
323 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324 amdgpu_crtc = to_amdgpu_crtc(crtc);
326 if (amdgpu_crtc->otg_inst == otg_inst)
333 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
335 return acrtc->dm_irq_params.freesync_config.state ==
336 VRR_STATE_ACTIVE_VARIABLE ||
337 acrtc->dm_irq_params.freesync_config.state ==
338 VRR_STATE_ACTIVE_FIXED;
341 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
343 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
347 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348 struct dm_crtc_state *new_state)
350 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
352 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
359 * dm_pflip_high_irq() - Handle pageflip interrupt
360 * @interrupt_params: ignored
362 * Handles the pageflip interrupt by notifying all interested parties
363 * that the pageflip has been completed.
365 static void dm_pflip_high_irq(void *interrupt_params)
367 struct amdgpu_crtc *amdgpu_crtc;
368 struct common_irq_params *irq_params = interrupt_params;
369 struct amdgpu_device *adev = irq_params->adev;
371 struct drm_pending_vblank_event *e;
372 uint32_t vpos, hpos, v_blank_start, v_blank_end;
375 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
377 /* IRQ could occur when in initial stage */
378 /* TODO work and BO cleanup */
379 if (amdgpu_crtc == NULL) {
380 DC_LOG_PFLIP("CRTC is null, returning.\n");
384 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
386 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
387 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
388 amdgpu_crtc->pflip_status,
389 AMDGPU_FLIP_SUBMITTED,
390 amdgpu_crtc->crtc_id,
392 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
396 /* page flip completed. */
397 e = amdgpu_crtc->event;
398 amdgpu_crtc->event = NULL;
403 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
405 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
407 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
408 &v_blank_end, &hpos, &vpos) ||
409 (vpos < v_blank_start)) {
410 /* Update to correct count and vblank timestamp if racing with
411 * vblank irq. This also updates to the correct vblank timestamp
412 * even in VRR mode, as scanout is past the front-porch atm.
414 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
416 /* Wake up userspace by sending the pageflip event with proper
417 * count and timestamp of vblank of flip completion.
420 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
422 /* Event sent, so done with vblank for this flip */
423 drm_crtc_vblank_put(&amdgpu_crtc->base);
426 /* VRR active and inside front-porch: vblank count and
427 * timestamp for pageflip event will only be up to date after
428 * drm_crtc_handle_vblank() has been executed from late vblank
429 * irq handler after start of back-porch (vline 0). We queue the
430 * pageflip event for send-out by drm_crtc_handle_vblank() with
431 * updated timestamp and count, once it runs after us.
433 * We need to open-code this instead of using the helper
434 * drm_crtc_arm_vblank_event(), as that helper would
435 * call drm_crtc_accurate_vblank_count(), which we must
436 * not call in VRR mode while we are in front-porch!
439 /* sequence will be replaced by real count during send-out. */
440 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
441 e->pipe = amdgpu_crtc->crtc_id;
443 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
447 /* Keep track of vblank of this flip for flip throttling. We use the
448 * cooked hw counter, as that one incremented at start of this vblank
449 * of pageflip completion, so last_flip_vblank is the forbidden count
450 * for queueing new pageflips if vsync + VRR is enabled.
452 amdgpu_crtc->dm_irq_params.last_flip_vblank =
453 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
455 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
456 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
458 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
459 amdgpu_crtc->crtc_id, amdgpu_crtc,
460 vrr_active, (int) !e);
463 static void dm_vupdate_high_irq(void *interrupt_params)
465 struct common_irq_params *irq_params = interrupt_params;
466 struct amdgpu_device *adev = irq_params->adev;
467 struct amdgpu_crtc *acrtc;
468 struct drm_device *drm_dev;
469 struct drm_vblank_crtc *vblank;
470 ktime_t frame_duration_ns, previous_timestamp;
474 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
477 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
478 drm_dev = acrtc->base.dev;
479 vblank = &drm_dev->vblank[acrtc->base.index];
480 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
481 frame_duration_ns = vblank->time - previous_timestamp;
483 if (frame_duration_ns > 0) {
484 trace_amdgpu_refresh_rate_track(acrtc->base.index,
486 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
487 atomic64_set(&irq_params->previous_timestamp, vblank->time);
490 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
494 /* Core vblank handling is done here after end of front-porch in
495 * vrr mode, as vblank timestamping will give valid results
496 * while now done after front-porch. This will also deliver
497 * page-flip completion events that have been queued to us
498 * if a pageflip happened inside front-porch.
501 drm_crtc_handle_vblank(&acrtc->base);
503 /* BTR processing for pre-DCE12 ASICs */
504 if (acrtc->dm_irq_params.stream &&
505 adev->family < AMDGPU_FAMILY_AI) {
506 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
507 mod_freesync_handle_v_update(
508 adev->dm.freesync_module,
509 acrtc->dm_irq_params.stream,
510 &acrtc->dm_irq_params.vrr_params);
512 dc_stream_adjust_vmin_vmax(
514 acrtc->dm_irq_params.stream,
515 &acrtc->dm_irq_params.vrr_params.adjust);
516 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
523 * dm_crtc_high_irq() - Handles CRTC interrupt
524 * @interrupt_params: used for determining the CRTC instance
526 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
529 static void dm_crtc_high_irq(void *interrupt_params)
531 struct common_irq_params *irq_params = interrupt_params;
532 struct amdgpu_device *adev = irq_params->adev;
533 struct amdgpu_crtc *acrtc;
537 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
541 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
543 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
544 vrr_active, acrtc->dm_irq_params.active_planes);
547 * Core vblank handling at start of front-porch is only possible
548 * in non-vrr mode, as only there vblank timestamping will give
549 * valid results while done in front-porch. Otherwise defer it
550 * to dm_vupdate_high_irq after end of front-porch.
553 drm_crtc_handle_vblank(&acrtc->base);
556 * Following stuff must happen at start of vblank, for crc
557 * computation and below-the-range btr support in vrr mode.
559 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
561 /* BTR updates need to happen before VUPDATE on Vega and above. */
562 if (adev->family < AMDGPU_FAMILY_AI)
565 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
567 if (acrtc->dm_irq_params.stream &&
568 acrtc->dm_irq_params.vrr_params.supported &&
569 acrtc->dm_irq_params.freesync_config.state ==
570 VRR_STATE_ACTIVE_VARIABLE) {
571 mod_freesync_handle_v_update(adev->dm.freesync_module,
572 acrtc->dm_irq_params.stream,
573 &acrtc->dm_irq_params.vrr_params);
575 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
576 &acrtc->dm_irq_params.vrr_params.adjust);
580 * If there aren't any active_planes then DCH HUBP may be clock-gated.
581 * In that case, pageflip completion interrupts won't fire and pageflip
582 * completion events won't get delivered. Prevent this by sending
583 * pending pageflip events from here if a flip is still pending.
585 * If any planes are enabled, use dm_pflip_high_irq() instead, to
586 * avoid race conditions between flip programming and completion,
587 * which could cause too early flip completion events.
589 if (adev->family >= AMDGPU_FAMILY_RV &&
590 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
591 acrtc->dm_irq_params.active_planes == 0) {
593 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
595 drm_crtc_vblank_put(&acrtc->base);
597 acrtc->pflip_status = AMDGPU_FLIP_NONE;
600 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
603 #if defined(CONFIG_DRM_AMD_DC_DCN)
604 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
606 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
607 * DCN generation ASICs
608 * @interrupt_params: interrupt parameters
610 * Used to set crc window/read out crc value at vertical line 0 position
612 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
614 struct common_irq_params *irq_params = interrupt_params;
615 struct amdgpu_device *adev = irq_params->adev;
616 struct amdgpu_crtc *acrtc;
618 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
623 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
628 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
629 * @interrupt_params: used for determining the Outbox instance
631 * Handles the Outbox Interrupt
634 #define DMUB_TRACE_MAX_READ 64
635 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
637 struct dmub_notification notify;
638 struct common_irq_params *irq_params = interrupt_params;
639 struct amdgpu_device *adev = irq_params->adev;
640 struct amdgpu_display_manager *dm = &adev->dm;
641 struct dmcub_trace_buf_entry entry = { 0 };
644 if (dc_enable_dmub_notifications(adev->dm.dc)) {
645 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
647 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify);
648 } while (notify.pending_notification);
650 if (adev->dm.dmub_notify)
651 memcpy(adev->dm.dmub_notify, ¬ify, sizeof(struct dmub_notification));
652 if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
653 complete(&adev->dm.dmub_aux_transfer_done);
654 // TODO : HPD Implementation
657 DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
663 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
664 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
665 entry.param0, entry.param1);
667 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
668 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
674 } while (count <= DMUB_TRACE_MAX_READ);
676 ASSERT(count <= DMUB_TRACE_MAX_READ);
680 static int dm_set_clockgating_state(void *handle,
681 enum amd_clockgating_state state)
686 static int dm_set_powergating_state(void *handle,
687 enum amd_powergating_state state)
692 /* Prototypes of private functions */
693 static int dm_early_init(void* handle);
695 /* Allocate memory for FBC compressed data */
696 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
698 struct drm_device *dev = connector->dev;
699 struct amdgpu_device *adev = drm_to_adev(dev);
700 struct dm_compressor_info *compressor = &adev->dm.compressor;
701 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
702 struct drm_display_mode *mode;
703 unsigned long max_size = 0;
705 if (adev->dm.dc->fbc_compressor == NULL)
708 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
711 if (compressor->bo_ptr)
715 list_for_each_entry(mode, &connector->modes, head) {
716 if (max_size < mode->htotal * mode->vtotal)
717 max_size = mode->htotal * mode->vtotal;
721 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
722 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
723 &compressor->gpu_addr, &compressor->cpu_addr);
726 DRM_ERROR("DM: Failed to initialize FBC\n");
728 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
729 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
736 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
737 int pipe, bool *enabled,
738 unsigned char *buf, int max_bytes)
740 struct drm_device *dev = dev_get_drvdata(kdev);
741 struct amdgpu_device *adev = drm_to_adev(dev);
742 struct drm_connector *connector;
743 struct drm_connector_list_iter conn_iter;
744 struct amdgpu_dm_connector *aconnector;
749 mutex_lock(&adev->dm.audio_lock);
751 drm_connector_list_iter_begin(dev, &conn_iter);
752 drm_for_each_connector_iter(connector, &conn_iter) {
753 aconnector = to_amdgpu_dm_connector(connector);
754 if (aconnector->audio_inst != port)
758 ret = drm_eld_size(connector->eld);
759 memcpy(buf, connector->eld, min(max_bytes, ret));
763 drm_connector_list_iter_end(&conn_iter);
765 mutex_unlock(&adev->dm.audio_lock);
767 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
772 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
773 .get_eld = amdgpu_dm_audio_component_get_eld,
776 static int amdgpu_dm_audio_component_bind(struct device *kdev,
777 struct device *hda_kdev, void *data)
779 struct drm_device *dev = dev_get_drvdata(kdev);
780 struct amdgpu_device *adev = drm_to_adev(dev);
781 struct drm_audio_component *acomp = data;
783 acomp->ops = &amdgpu_dm_audio_component_ops;
785 adev->dm.audio_component = acomp;
790 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
791 struct device *hda_kdev, void *data)
793 struct drm_device *dev = dev_get_drvdata(kdev);
794 struct amdgpu_device *adev = drm_to_adev(dev);
795 struct drm_audio_component *acomp = data;
799 adev->dm.audio_component = NULL;
802 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
803 .bind = amdgpu_dm_audio_component_bind,
804 .unbind = amdgpu_dm_audio_component_unbind,
807 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
814 adev->mode_info.audio.enabled = true;
816 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
818 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
819 adev->mode_info.audio.pin[i].channels = -1;
820 adev->mode_info.audio.pin[i].rate = -1;
821 adev->mode_info.audio.pin[i].bits_per_sample = -1;
822 adev->mode_info.audio.pin[i].status_bits = 0;
823 adev->mode_info.audio.pin[i].category_code = 0;
824 adev->mode_info.audio.pin[i].connected = false;
825 adev->mode_info.audio.pin[i].id =
826 adev->dm.dc->res_pool->audios[i]->inst;
827 adev->mode_info.audio.pin[i].offset = 0;
830 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
834 adev->dm.audio_registered = true;
839 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
844 if (!adev->mode_info.audio.enabled)
847 if (adev->dm.audio_registered) {
848 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
849 adev->dm.audio_registered = false;
852 /* TODO: Disable audio? */
854 adev->mode_info.audio.enabled = false;
857 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
859 struct drm_audio_component *acomp = adev->dm.audio_component;
861 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
862 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
864 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
869 static int dm_dmub_hw_init(struct amdgpu_device *adev)
871 const struct dmcub_firmware_header_v1_0 *hdr;
872 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
873 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
874 const struct firmware *dmub_fw = adev->dm.dmub_fw;
875 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
876 struct abm *abm = adev->dm.dc->res_pool->abm;
877 struct dmub_srv_hw_params hw_params;
878 enum dmub_status status;
879 const unsigned char *fw_inst_const, *fw_bss_data;
880 uint32_t i, fw_inst_const_size, fw_bss_data_size;
884 /* DMUB isn't supported on the ASIC. */
888 DRM_ERROR("No framebuffer info for DMUB service.\n");
893 /* Firmware required for DMUB support. */
894 DRM_ERROR("No firmware provided for DMUB.\n");
898 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
899 if (status != DMUB_STATUS_OK) {
900 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
904 if (!has_hw_support) {
905 DRM_INFO("DMUB unsupported on ASIC\n");
909 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
911 fw_inst_const = dmub_fw->data +
912 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
915 fw_bss_data = dmub_fw->data +
916 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
917 le32_to_cpu(hdr->inst_const_bytes);
919 /* Copy firmware and bios info into FB memory. */
920 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
921 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
923 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
925 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
926 * amdgpu_ucode_init_single_fw will load dmub firmware
927 * fw_inst_const part to cw0; otherwise, the firmware back door load
928 * will be done by dm_dmub_hw_init
930 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
931 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
935 if (fw_bss_data_size)
936 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
937 fw_bss_data, fw_bss_data_size);
939 /* Copy firmware bios info into FB memory. */
940 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
943 /* Reset regions that need to be reset. */
944 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
945 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
947 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
948 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
950 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
951 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
953 /* Initialize hardware. */
954 memset(&hw_params, 0, sizeof(hw_params));
955 hw_params.fb_base = adev->gmc.fb_start;
956 hw_params.fb_offset = adev->gmc.aper_base;
958 /* backdoor load firmware and trigger dmub running */
959 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
960 hw_params.load_inst_const = true;
963 hw_params.psp_version = dmcu->psp_version;
965 for (i = 0; i < fb_info->num_fb; ++i)
966 hw_params.fb[i] = &fb_info->fb[i];
968 status = dmub_srv_hw_init(dmub_srv, &hw_params);
969 if (status != DMUB_STATUS_OK) {
970 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
974 /* Wait for firmware load to finish. */
975 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
976 if (status != DMUB_STATUS_OK)
977 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
979 /* Init DMCU and ABM if available. */
981 dmcu->funcs->dmcu_init(dmcu);
982 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
985 if (!adev->dm.dc->ctx->dmub_srv)
986 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
987 if (!adev->dm.dc->ctx->dmub_srv) {
988 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
992 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
993 adev->dm.dmcub_fw_version);
998 #if defined(CONFIG_DRM_AMD_DC_DCN)
999 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1002 uint32_t logical_addr_low;
1003 uint32_t logical_addr_high;
1004 uint32_t agp_base, agp_bot, agp_top;
1005 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1007 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1008 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1010 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1012 * Raven2 has a HW issue that it is unable to use the vram which
1013 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1014 * workaround that increase system aperture high address (add 1)
1015 * to get rid of the VM fault and hardware hang.
1017 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1019 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1022 agp_bot = adev->gmc.agp_start >> 24;
1023 agp_top = adev->gmc.agp_end >> 24;
1026 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1027 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1028 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1029 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1030 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1031 page_table_base.low_part = lower_32_bits(pt_base);
1033 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1034 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1036 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1037 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1038 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1040 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1041 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1042 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1044 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1045 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1046 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1048 pa_config->is_hvm_enabled = 0;
1052 #if defined(CONFIG_DRM_AMD_DC_DCN)
1053 static void event_mall_stutter(struct work_struct *work)
1056 struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1057 struct amdgpu_display_manager *dm = vblank_work->dm;
1059 mutex_lock(&dm->dc_lock);
1061 if (vblank_work->enable)
1062 dm->active_vblank_irq_count++;
1063 else if(dm->active_vblank_irq_count)
1064 dm->active_vblank_irq_count--;
1066 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1068 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1070 mutex_unlock(&dm->dc_lock);
1073 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1076 int max_caps = dc->caps.max_links;
1077 struct vblank_workqueue *vblank_work;
1080 vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1081 if (ZERO_OR_NULL_PTR(vblank_work)) {
1086 for (i = 0; i < max_caps; i++)
1087 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1092 static int amdgpu_dm_init(struct amdgpu_device *adev)
1094 struct dc_init_data init_data;
1095 #ifdef CONFIG_DRM_AMD_DC_HDCP
1096 struct dc_callback_init init_params;
1100 adev->dm.ddev = adev_to_drm(adev);
1101 adev->dm.adev = adev;
1103 /* Zero all the fields */
1104 memset(&init_data, 0, sizeof(init_data));
1105 #ifdef CONFIG_DRM_AMD_DC_HDCP
1106 memset(&init_params, 0, sizeof(init_params));
1109 mutex_init(&adev->dm.dc_lock);
1110 mutex_init(&adev->dm.audio_lock);
1111 #if defined(CONFIG_DRM_AMD_DC_DCN)
1112 spin_lock_init(&adev->dm.vblank_lock);
1115 if(amdgpu_dm_irq_init(adev)) {
1116 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1120 init_data.asic_id.chip_family = adev->family;
1122 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1123 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1125 init_data.asic_id.vram_width = adev->gmc.vram_width;
1126 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1127 init_data.asic_id.atombios_base_address =
1128 adev->mode_info.atom_context->bios;
1130 init_data.driver = adev;
1132 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1134 if (!adev->dm.cgs_device) {
1135 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1139 init_data.cgs_device = adev->dm.cgs_device;
1141 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1143 switch (adev->asic_type) {
1148 init_data.flags.gpu_vm_support = true;
1149 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1150 init_data.flags.disable_dmcu = true;
1152 #if defined(CONFIG_DRM_AMD_DC_DCN)
1154 init_data.flags.gpu_vm_support = true;
1161 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1162 init_data.flags.fbc_support = true;
1164 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1165 init_data.flags.multi_mon_pp_mclk_switch = true;
1167 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1168 init_data.flags.disable_fractional_pwm = true;
1170 init_data.flags.power_down_display_on_boot = true;
1172 INIT_LIST_HEAD(&adev->dm.da_list);
1173 /* Display Core create. */
1174 adev->dm.dc = dc_create(&init_data);
1177 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1179 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1183 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1184 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1185 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1188 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1189 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1191 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1192 adev->dm.dc->debug.disable_stutter = true;
1194 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1195 adev->dm.dc->debug.disable_dsc = true;
1197 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1198 adev->dm.dc->debug.disable_clock_gate = true;
1200 r = dm_dmub_hw_init(adev);
1202 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1206 dc_hardware_init(adev->dm.dc);
1208 #if defined(CONFIG_DRM_AMD_DC_DCN)
1209 if (adev->apu_flags) {
1210 struct dc_phy_addr_space_config pa_config;
1212 mmhub_read_system_context(adev, &pa_config);
1214 // Call the DC init_memory func
1215 dc_setup_system_context(adev->dm.dc, &pa_config);
1219 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1220 if (!adev->dm.freesync_module) {
1222 "amdgpu: failed to initialize freesync_module.\n");
1224 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1225 adev->dm.freesync_module);
1227 amdgpu_dm_init_color_mod();
1229 #if defined(CONFIG_DRM_AMD_DC_DCN)
1230 if (adev->dm.dc->caps.max_links > 0) {
1231 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1233 if (!adev->dm.vblank_workqueue)
1234 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1236 DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1240 #ifdef CONFIG_DRM_AMD_DC_HDCP
1241 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1242 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1244 if (!adev->dm.hdcp_workqueue)
1245 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1247 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1249 dc_init_callbacks(adev->dm.dc, &init_params);
1252 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1253 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1255 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1256 init_completion(&adev->dm.dmub_aux_transfer_done);
1257 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1258 if (!adev->dm.dmub_notify) {
1259 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1262 amdgpu_dm_outbox_init(adev);
1265 if (amdgpu_dm_initialize_drm_device(adev)) {
1267 "amdgpu: failed to initialize sw for display support.\n");
1271 /* create fake encoders for MST */
1272 dm_dp_create_fake_mst_encoders(adev);
1274 /* TODO: Add_display_info? */
1276 /* TODO use dynamic cursor width */
1277 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1278 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1280 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1282 "amdgpu: failed to initialize sw for display support.\n");
1287 DRM_DEBUG_DRIVER("KMS initialized.\n");
1291 amdgpu_dm_fini(adev);
1296 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1300 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1301 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1304 amdgpu_dm_audio_fini(adev);
1306 amdgpu_dm_destroy_drm_device(&adev->dm);
1308 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1309 if (adev->dm.crc_rd_wrk) {
1310 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1311 kfree(adev->dm.crc_rd_wrk);
1312 adev->dm.crc_rd_wrk = NULL;
1315 #ifdef CONFIG_DRM_AMD_DC_HDCP
1316 if (adev->dm.hdcp_workqueue) {
1317 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1318 adev->dm.hdcp_workqueue = NULL;
1322 dc_deinit_callbacks(adev->dm.dc);
1325 #if defined(CONFIG_DRM_AMD_DC_DCN)
1326 if (adev->dm.vblank_workqueue) {
1327 adev->dm.vblank_workqueue->dm = NULL;
1328 kfree(adev->dm.vblank_workqueue);
1329 adev->dm.vblank_workqueue = NULL;
1333 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1335 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1336 kfree(adev->dm.dmub_notify);
1337 adev->dm.dmub_notify = NULL;
1340 if (adev->dm.dmub_bo)
1341 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1342 &adev->dm.dmub_bo_gpu_addr,
1343 &adev->dm.dmub_bo_cpu_addr);
1345 /* DC Destroy TODO: Replace destroy DAL */
1347 dc_destroy(&adev->dm.dc);
1349 * TODO: pageflip, vlank interrupt
1351 * amdgpu_dm_irq_fini(adev);
1354 if (adev->dm.cgs_device) {
1355 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1356 adev->dm.cgs_device = NULL;
1358 if (adev->dm.freesync_module) {
1359 mod_freesync_destroy(adev->dm.freesync_module);
1360 adev->dm.freesync_module = NULL;
1363 mutex_destroy(&adev->dm.audio_lock);
1364 mutex_destroy(&adev->dm.dc_lock);
1369 static int load_dmcu_fw(struct amdgpu_device *adev)
1371 const char *fw_name_dmcu = NULL;
1373 const struct dmcu_firmware_header_v1_0 *hdr;
1375 switch(adev->asic_type) {
1376 #if defined(CONFIG_DRM_AMD_DC_SI)
1391 case CHIP_POLARIS11:
1392 case CHIP_POLARIS10:
1393 case CHIP_POLARIS12:
1401 case CHIP_SIENNA_CICHLID:
1402 case CHIP_NAVY_FLOUNDER:
1403 case CHIP_DIMGREY_CAVEFISH:
1404 case CHIP_BEIGE_GOBY:
1408 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1411 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1412 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1413 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1414 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1419 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1423 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1424 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1428 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1430 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1431 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1432 adev->dm.fw_dmcu = NULL;
1436 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1441 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1443 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1445 release_firmware(adev->dm.fw_dmcu);
1446 adev->dm.fw_dmcu = NULL;
1450 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1451 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1452 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1453 adev->firmware.fw_size +=
1454 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1456 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1457 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1458 adev->firmware.fw_size +=
1459 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1461 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1463 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1468 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1470 struct amdgpu_device *adev = ctx;
1472 return dm_read_reg(adev->dm.dc->ctx, address);
1475 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1478 struct amdgpu_device *adev = ctx;
1480 return dm_write_reg(adev->dm.dc->ctx, address, value);
1483 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1485 struct dmub_srv_create_params create_params;
1486 struct dmub_srv_region_params region_params;
1487 struct dmub_srv_region_info region_info;
1488 struct dmub_srv_fb_params fb_params;
1489 struct dmub_srv_fb_info *fb_info;
1490 struct dmub_srv *dmub_srv;
1491 const struct dmcub_firmware_header_v1_0 *hdr;
1492 const char *fw_name_dmub;
1493 enum dmub_asic dmub_asic;
1494 enum dmub_status status;
1497 switch (adev->asic_type) {
1499 dmub_asic = DMUB_ASIC_DCN21;
1500 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1501 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1502 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1504 case CHIP_SIENNA_CICHLID:
1505 dmub_asic = DMUB_ASIC_DCN30;
1506 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1508 case CHIP_NAVY_FLOUNDER:
1509 dmub_asic = DMUB_ASIC_DCN30;
1510 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1513 dmub_asic = DMUB_ASIC_DCN301;
1514 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1516 case CHIP_DIMGREY_CAVEFISH:
1517 dmub_asic = DMUB_ASIC_DCN302;
1518 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1520 case CHIP_BEIGE_GOBY:
1521 dmub_asic = DMUB_ASIC_DCN303;
1522 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1526 /* ASIC doesn't support DMUB. */
1530 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1532 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1536 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1538 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1542 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1544 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1545 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1546 AMDGPU_UCODE_ID_DMCUB;
1547 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1549 adev->firmware.fw_size +=
1550 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1552 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1553 adev->dm.dmcub_fw_version);
1556 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1558 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1559 dmub_srv = adev->dm.dmub_srv;
1562 DRM_ERROR("Failed to allocate DMUB service!\n");
1566 memset(&create_params, 0, sizeof(create_params));
1567 create_params.user_ctx = adev;
1568 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1569 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1570 create_params.asic = dmub_asic;
1572 /* Create the DMUB service. */
1573 status = dmub_srv_create(dmub_srv, &create_params);
1574 if (status != DMUB_STATUS_OK) {
1575 DRM_ERROR("Error creating DMUB service: %d\n", status);
1579 /* Calculate the size of all the regions for the DMUB service. */
1580 memset(®ion_params, 0, sizeof(region_params));
1582 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1583 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1584 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1585 region_params.vbios_size = adev->bios_size;
1586 region_params.fw_bss_data = region_params.bss_data_size ?
1587 adev->dm.dmub_fw->data +
1588 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1589 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1590 region_params.fw_inst_const =
1591 adev->dm.dmub_fw->data +
1592 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1595 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1598 if (status != DMUB_STATUS_OK) {
1599 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1604 * Allocate a framebuffer based on the total size of all the regions.
1605 * TODO: Move this into GART.
1607 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1608 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1609 &adev->dm.dmub_bo_gpu_addr,
1610 &adev->dm.dmub_bo_cpu_addr);
1614 /* Rebase the regions on the framebuffer address. */
1615 memset(&fb_params, 0, sizeof(fb_params));
1616 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1617 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1618 fb_params.region_info = ®ion_info;
1620 adev->dm.dmub_fb_info =
1621 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1622 fb_info = adev->dm.dmub_fb_info;
1626 "Failed to allocate framebuffer info for DMUB service!\n");
1630 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1631 if (status != DMUB_STATUS_OK) {
1632 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1639 static int dm_sw_init(void *handle)
1641 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1644 r = dm_dmub_sw_init(adev);
1648 return load_dmcu_fw(adev);
1651 static int dm_sw_fini(void *handle)
1653 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1655 kfree(adev->dm.dmub_fb_info);
1656 adev->dm.dmub_fb_info = NULL;
1658 if (adev->dm.dmub_srv) {
1659 dmub_srv_destroy(adev->dm.dmub_srv);
1660 adev->dm.dmub_srv = NULL;
1663 release_firmware(adev->dm.dmub_fw);
1664 adev->dm.dmub_fw = NULL;
1666 release_firmware(adev->dm.fw_dmcu);
1667 adev->dm.fw_dmcu = NULL;
1672 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1674 struct amdgpu_dm_connector *aconnector;
1675 struct drm_connector *connector;
1676 struct drm_connector_list_iter iter;
1679 drm_connector_list_iter_begin(dev, &iter);
1680 drm_for_each_connector_iter(connector, &iter) {
1681 aconnector = to_amdgpu_dm_connector(connector);
1682 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1683 aconnector->mst_mgr.aux) {
1684 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1686 aconnector->base.base.id);
1688 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1690 DRM_ERROR("DM_MST: Failed to start MST\n");
1691 aconnector->dc_link->type =
1692 dc_connection_single;
1697 drm_connector_list_iter_end(&iter);
1702 static int dm_late_init(void *handle)
1704 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1706 struct dmcu_iram_parameters params;
1707 unsigned int linear_lut[16];
1709 struct dmcu *dmcu = NULL;
1711 dmcu = adev->dm.dc->res_pool->dmcu;
1713 for (i = 0; i < 16; i++)
1714 linear_lut[i] = 0xFFFF * i / 15;
1717 params.backlight_ramping_start = 0xCCCC;
1718 params.backlight_ramping_reduction = 0xCCCCCCCC;
1719 params.backlight_lut_array_size = 16;
1720 params.backlight_lut_array = linear_lut;
1722 /* Min backlight level after ABM reduction, Don't allow below 1%
1723 * 0xFFFF x 0.01 = 0x28F
1725 params.min_abm_backlight = 0x28F;
1726 /* In the case where abm is implemented on dmcub,
1727 * dmcu object will be null.
1728 * ABM 2.4 and up are implemented on dmcub.
1731 if (!dmcu_load_iram(dmcu, params))
1733 } else if (adev->dm.dc->ctx->dmub_srv) {
1734 struct dc_link *edp_links[MAX_NUM_EDP];
1737 get_edp_links(adev->dm.dc, edp_links, &edp_num);
1738 for (i = 0; i < edp_num; i++) {
1739 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
1744 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1747 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1749 struct amdgpu_dm_connector *aconnector;
1750 struct drm_connector *connector;
1751 struct drm_connector_list_iter iter;
1752 struct drm_dp_mst_topology_mgr *mgr;
1754 bool need_hotplug = false;
1756 drm_connector_list_iter_begin(dev, &iter);
1757 drm_for_each_connector_iter(connector, &iter) {
1758 aconnector = to_amdgpu_dm_connector(connector);
1759 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1760 aconnector->mst_port)
1763 mgr = &aconnector->mst_mgr;
1766 drm_dp_mst_topology_mgr_suspend(mgr);
1768 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1770 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1771 need_hotplug = true;
1775 drm_connector_list_iter_end(&iter);
1778 drm_kms_helper_hotplug_event(dev);
1781 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1783 struct smu_context *smu = &adev->smu;
1786 if (!is_support_sw_smu(adev))
1789 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1790 * on window driver dc implementation.
1791 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1792 * should be passed to smu during boot up and resume from s3.
1793 * boot up: dc calculate dcn watermark clock settings within dc_create,
1794 * dcn20_resource_construct
1795 * then call pplib functions below to pass the settings to smu:
1796 * smu_set_watermarks_for_clock_ranges
1797 * smu_set_watermarks_table
1798 * navi10_set_watermarks_table
1799 * smu_write_watermarks_table
1801 * For Renoir, clock settings of dcn watermark are also fixed values.
1802 * dc has implemented different flow for window driver:
1803 * dc_hardware_init / dc_set_power_state
1808 * smu_set_watermarks_for_clock_ranges
1809 * renoir_set_watermarks_table
1810 * smu_write_watermarks_table
1813 * dc_hardware_init -> amdgpu_dm_init
1814 * dc_set_power_state --> dm_resume
1816 * therefore, this function apply to navi10/12/14 but not Renoir
1819 switch(adev->asic_type) {
1828 ret = smu_write_watermarks_table(smu);
1830 DRM_ERROR("Failed to update WMTABLE!\n");
1838 * dm_hw_init() - Initialize DC device
1839 * @handle: The base driver device containing the amdgpu_dm device.
1841 * Initialize the &struct amdgpu_display_manager device. This involves calling
1842 * the initializers of each DM component, then populating the struct with them.
1844 * Although the function implies hardware initialization, both hardware and
1845 * software are initialized here. Splitting them out to their relevant init
1846 * hooks is a future TODO item.
1848 * Some notable things that are initialized here:
1850 * - Display Core, both software and hardware
1851 * - DC modules that we need (freesync and color management)
1852 * - DRM software states
1853 * - Interrupt sources and handlers
1855 * - Debug FS entries, if enabled
1857 static int dm_hw_init(void *handle)
1859 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1860 /* Create DAL display manager */
1861 amdgpu_dm_init(adev);
1862 amdgpu_dm_hpd_init(adev);
1868 * dm_hw_fini() - Teardown DC device
1869 * @handle: The base driver device containing the amdgpu_dm device.
1871 * Teardown components within &struct amdgpu_display_manager that require
1872 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1873 * were loaded. Also flush IRQ workqueues and disable them.
1875 static int dm_hw_fini(void *handle)
1877 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1879 amdgpu_dm_hpd_fini(adev);
1881 amdgpu_dm_irq_fini(adev);
1882 amdgpu_dm_fini(adev);
1887 static int dm_enable_vblank(struct drm_crtc *crtc);
1888 static void dm_disable_vblank(struct drm_crtc *crtc);
1890 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1891 struct dc_state *state, bool enable)
1893 enum dc_irq_source irq_source;
1894 struct amdgpu_crtc *acrtc;
1898 for (i = 0; i < state->stream_count; i++) {
1899 acrtc = get_crtc_by_otg_inst(
1900 adev, state->stream_status[i].primary_otg_inst);
1902 if (acrtc && state->stream_status[i].plane_count != 0) {
1903 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1904 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1905 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1906 acrtc->crtc_id, enable ? "en" : "dis", rc);
1908 DRM_WARN("Failed to %s pflip interrupts\n",
1909 enable ? "enable" : "disable");
1912 rc = dm_enable_vblank(&acrtc->base);
1914 DRM_WARN("Failed to enable vblank interrupts\n");
1916 dm_disable_vblank(&acrtc->base);
1924 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1926 struct dc_state *context = NULL;
1927 enum dc_status res = DC_ERROR_UNEXPECTED;
1929 struct dc_stream_state *del_streams[MAX_PIPES];
1930 int del_streams_count = 0;
1932 memset(del_streams, 0, sizeof(del_streams));
1934 context = dc_create_state(dc);
1935 if (context == NULL)
1936 goto context_alloc_fail;
1938 dc_resource_state_copy_construct_current(dc, context);
1940 /* First remove from context all streams */
1941 for (i = 0; i < context->stream_count; i++) {
1942 struct dc_stream_state *stream = context->streams[i];
1944 del_streams[del_streams_count++] = stream;
1947 /* Remove all planes for removed streams and then remove the streams */
1948 for (i = 0; i < del_streams_count; i++) {
1949 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1950 res = DC_FAIL_DETACH_SURFACES;
1954 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1960 res = dc_validate_global_state(dc, context, false);
1963 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1967 res = dc_commit_state(dc, context);
1970 dc_release_state(context);
1976 static int dm_suspend(void *handle)
1978 struct amdgpu_device *adev = handle;
1979 struct amdgpu_display_manager *dm = &adev->dm;
1982 if (amdgpu_in_reset(adev)) {
1983 mutex_lock(&dm->dc_lock);
1985 #if defined(CONFIG_DRM_AMD_DC_DCN)
1986 dc_allow_idle_optimizations(adev->dm.dc, false);
1989 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1991 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1993 amdgpu_dm_commit_zero_streams(dm->dc);
1995 amdgpu_dm_irq_suspend(adev);
2000 WARN_ON(adev->dm.cached_state);
2001 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2003 s3_handle_mst(adev_to_drm(adev), true);
2005 amdgpu_dm_irq_suspend(adev);
2007 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2012 static struct amdgpu_dm_connector *
2013 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2014 struct drm_crtc *crtc)
2017 struct drm_connector_state *new_con_state;
2018 struct drm_connector *connector;
2019 struct drm_crtc *crtc_from_state;
2021 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2022 crtc_from_state = new_con_state->crtc;
2024 if (crtc_from_state == crtc)
2025 return to_amdgpu_dm_connector(connector);
2031 static void emulated_link_detect(struct dc_link *link)
2033 struct dc_sink_init_data sink_init_data = { 0 };
2034 struct display_sink_capability sink_caps = { 0 };
2035 enum dc_edid_status edid_status;
2036 struct dc_context *dc_ctx = link->ctx;
2037 struct dc_sink *sink = NULL;
2038 struct dc_sink *prev_sink = NULL;
2040 link->type = dc_connection_none;
2041 prev_sink = link->local_sink;
2044 dc_sink_release(prev_sink);
2046 switch (link->connector_signal) {
2047 case SIGNAL_TYPE_HDMI_TYPE_A: {
2048 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2049 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2053 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2054 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2055 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2059 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2060 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2061 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2065 case SIGNAL_TYPE_LVDS: {
2066 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2067 sink_caps.signal = SIGNAL_TYPE_LVDS;
2071 case SIGNAL_TYPE_EDP: {
2072 sink_caps.transaction_type =
2073 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2074 sink_caps.signal = SIGNAL_TYPE_EDP;
2078 case SIGNAL_TYPE_DISPLAY_PORT: {
2079 sink_caps.transaction_type =
2080 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2081 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2086 DC_ERROR("Invalid connector type! signal:%d\n",
2087 link->connector_signal);
2091 sink_init_data.link = link;
2092 sink_init_data.sink_signal = sink_caps.signal;
2094 sink = dc_sink_create(&sink_init_data);
2096 DC_ERROR("Failed to create sink!\n");
2100 /* dc_sink_create returns a new reference */
2101 link->local_sink = sink;
2103 edid_status = dm_helpers_read_local_edid(
2108 if (edid_status != EDID_OK)
2109 DC_ERROR("Failed to read EDID");
2113 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2114 struct amdgpu_display_manager *dm)
2117 struct dc_surface_update surface_updates[MAX_SURFACES];
2118 struct dc_plane_info plane_infos[MAX_SURFACES];
2119 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2120 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2121 struct dc_stream_update stream_update;
2125 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2128 dm_error("Failed to allocate update bundle\n");
2132 for (k = 0; k < dc_state->stream_count; k++) {
2133 bundle->stream_update.stream = dc_state->streams[k];
2135 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2136 bundle->surface_updates[m].surface =
2137 dc_state->stream_status->plane_states[m];
2138 bundle->surface_updates[m].surface->force_full_update =
2141 dc_commit_updates_for_stream(
2142 dm->dc, bundle->surface_updates,
2143 dc_state->stream_status->plane_count,
2144 dc_state->streams[k], &bundle->stream_update, dc_state);
2153 static void dm_set_dpms_off(struct dc_link *link)
2155 struct dc_stream_state *stream_state;
2156 struct amdgpu_dm_connector *aconnector = link->priv;
2157 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2158 struct dc_stream_update stream_update;
2159 bool dpms_off = true;
2161 memset(&stream_update, 0, sizeof(stream_update));
2162 stream_update.dpms_off = &dpms_off;
2164 mutex_lock(&adev->dm.dc_lock);
2165 stream_state = dc_stream_find_from_link(link);
2167 if (stream_state == NULL) {
2168 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2169 mutex_unlock(&adev->dm.dc_lock);
2173 stream_update.stream = stream_state;
2174 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2175 stream_state, &stream_update,
2176 stream_state->ctx->dc->current_state);
2177 mutex_unlock(&adev->dm.dc_lock);
2180 static int dm_resume(void *handle)
2182 struct amdgpu_device *adev = handle;
2183 struct drm_device *ddev = adev_to_drm(adev);
2184 struct amdgpu_display_manager *dm = &adev->dm;
2185 struct amdgpu_dm_connector *aconnector;
2186 struct drm_connector *connector;
2187 struct drm_connector_list_iter iter;
2188 struct drm_crtc *crtc;
2189 struct drm_crtc_state *new_crtc_state;
2190 struct dm_crtc_state *dm_new_crtc_state;
2191 struct drm_plane *plane;
2192 struct drm_plane_state *new_plane_state;
2193 struct dm_plane_state *dm_new_plane_state;
2194 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2195 enum dc_connection_type new_connection_type = dc_connection_none;
2196 struct dc_state *dc_state;
2199 if (amdgpu_in_reset(adev)) {
2200 dc_state = dm->cached_dc_state;
2202 r = dm_dmub_hw_init(adev);
2204 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2206 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2209 amdgpu_dm_irq_resume_early(adev);
2211 for (i = 0; i < dc_state->stream_count; i++) {
2212 dc_state->streams[i]->mode_changed = true;
2213 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2214 dc_state->stream_status->plane_states[j]->update_flags.raw
2219 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2221 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2223 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2225 dc_release_state(dm->cached_dc_state);
2226 dm->cached_dc_state = NULL;
2228 amdgpu_dm_irq_resume_late(adev);
2230 mutex_unlock(&dm->dc_lock);
2234 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2235 dc_release_state(dm_state->context);
2236 dm_state->context = dc_create_state(dm->dc);
2237 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2238 dc_resource_state_construct(dm->dc, dm_state->context);
2240 /* Before powering on DC we need to re-initialize DMUB. */
2241 r = dm_dmub_hw_init(adev);
2243 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2245 /* power on hardware */
2246 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2248 /* program HPD filter */
2252 * early enable HPD Rx IRQ, should be done before set mode as short
2253 * pulse interrupts are used for MST
2255 amdgpu_dm_irq_resume_early(adev);
2257 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2258 s3_handle_mst(ddev, false);
2261 drm_connector_list_iter_begin(ddev, &iter);
2262 drm_for_each_connector_iter(connector, &iter) {
2263 aconnector = to_amdgpu_dm_connector(connector);
2266 * this is the case when traversing through already created
2267 * MST connectors, should be skipped
2269 if (aconnector->mst_port)
2272 mutex_lock(&aconnector->hpd_lock);
2273 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2274 DRM_ERROR("KMS: Failed to detect connector\n");
2276 if (aconnector->base.force && new_connection_type == dc_connection_none)
2277 emulated_link_detect(aconnector->dc_link);
2279 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2281 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2282 aconnector->fake_enable = false;
2284 if (aconnector->dc_sink)
2285 dc_sink_release(aconnector->dc_sink);
2286 aconnector->dc_sink = NULL;
2287 amdgpu_dm_update_connector_after_detect(aconnector);
2288 mutex_unlock(&aconnector->hpd_lock);
2290 drm_connector_list_iter_end(&iter);
2292 /* Force mode set in atomic commit */
2293 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2294 new_crtc_state->active_changed = true;
2297 * atomic_check is expected to create the dc states. We need to release
2298 * them here, since they were duplicated as part of the suspend
2301 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2302 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2303 if (dm_new_crtc_state->stream) {
2304 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2305 dc_stream_release(dm_new_crtc_state->stream);
2306 dm_new_crtc_state->stream = NULL;
2310 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2311 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2312 if (dm_new_plane_state->dc_state) {
2313 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2314 dc_plane_state_release(dm_new_plane_state->dc_state);
2315 dm_new_plane_state->dc_state = NULL;
2319 drm_atomic_helper_resume(ddev, dm->cached_state);
2321 dm->cached_state = NULL;
2323 amdgpu_dm_irq_resume_late(adev);
2325 amdgpu_dm_smu_write_watermarks_table(adev);
2333 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2334 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2335 * the base driver's device list to be initialized and torn down accordingly.
2337 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2340 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2342 .early_init = dm_early_init,
2343 .late_init = dm_late_init,
2344 .sw_init = dm_sw_init,
2345 .sw_fini = dm_sw_fini,
2346 .hw_init = dm_hw_init,
2347 .hw_fini = dm_hw_fini,
2348 .suspend = dm_suspend,
2349 .resume = dm_resume,
2350 .is_idle = dm_is_idle,
2351 .wait_for_idle = dm_wait_for_idle,
2352 .check_soft_reset = dm_check_soft_reset,
2353 .soft_reset = dm_soft_reset,
2354 .set_clockgating_state = dm_set_clockgating_state,
2355 .set_powergating_state = dm_set_powergating_state,
2358 const struct amdgpu_ip_block_version dm_ip_block =
2360 .type = AMD_IP_BLOCK_TYPE_DCE,
2364 .funcs = &amdgpu_dm_funcs,
2374 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2375 .fb_create = amdgpu_display_user_framebuffer_create,
2376 .get_format_info = amd_get_format_info,
2377 .output_poll_changed = drm_fb_helper_output_poll_changed,
2378 .atomic_check = amdgpu_dm_atomic_check,
2379 .atomic_commit = drm_atomic_helper_commit,
2382 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2383 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2386 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2388 u32 max_cll, min_cll, max, min, q, r;
2389 struct amdgpu_dm_backlight_caps *caps;
2390 struct amdgpu_display_manager *dm;
2391 struct drm_connector *conn_base;
2392 struct amdgpu_device *adev;
2393 struct dc_link *link = NULL;
2394 static const u8 pre_computed_values[] = {
2395 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2396 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2398 if (!aconnector || !aconnector->dc_link)
2401 link = aconnector->dc_link;
2402 if (link->connector_signal != SIGNAL_TYPE_EDP)
2405 conn_base = &aconnector->base;
2406 adev = drm_to_adev(conn_base->dev);
2408 caps = &dm->backlight_caps;
2409 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2410 caps->aux_support = false;
2411 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2412 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2414 if (caps->ext_caps->bits.oled == 1 ||
2415 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2416 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2417 caps->aux_support = true;
2419 if (amdgpu_backlight == 0)
2420 caps->aux_support = false;
2421 else if (amdgpu_backlight == 1)
2422 caps->aux_support = true;
2424 /* From the specification (CTA-861-G), for calculating the maximum
2425 * luminance we need to use:
2426 * Luminance = 50*2**(CV/32)
2427 * Where CV is a one-byte value.
2428 * For calculating this expression we may need float point precision;
2429 * to avoid this complexity level, we take advantage that CV is divided
2430 * by a constant. From the Euclids division algorithm, we know that CV
2431 * can be written as: CV = 32*q + r. Next, we replace CV in the
2432 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2433 * need to pre-compute the value of r/32. For pre-computing the values
2434 * We just used the following Ruby line:
2435 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2436 * The results of the above expressions can be verified at
2437 * pre_computed_values.
2441 max = (1 << q) * pre_computed_values[r];
2443 // min luminance: maxLum * (CV/255)^2 / 100
2444 q = DIV_ROUND_CLOSEST(min_cll, 255);
2445 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2447 caps->aux_max_input_signal = max;
2448 caps->aux_min_input_signal = min;
2451 void amdgpu_dm_update_connector_after_detect(
2452 struct amdgpu_dm_connector *aconnector)
2454 struct drm_connector *connector = &aconnector->base;
2455 struct drm_device *dev = connector->dev;
2456 struct dc_sink *sink;
2458 /* MST handled by drm_mst framework */
2459 if (aconnector->mst_mgr.mst_state == true)
2462 sink = aconnector->dc_link->local_sink;
2464 dc_sink_retain(sink);
2467 * Edid mgmt connector gets first update only in mode_valid hook and then
2468 * the connector sink is set to either fake or physical sink depends on link status.
2469 * Skip if already done during boot.
2471 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2472 && aconnector->dc_em_sink) {
2475 * For S3 resume with headless use eml_sink to fake stream
2476 * because on resume connector->sink is set to NULL
2478 mutex_lock(&dev->mode_config.mutex);
2481 if (aconnector->dc_sink) {
2482 amdgpu_dm_update_freesync_caps(connector, NULL);
2484 * retain and release below are used to
2485 * bump up refcount for sink because the link doesn't point
2486 * to it anymore after disconnect, so on next crtc to connector
2487 * reshuffle by UMD we will get into unwanted dc_sink release
2489 dc_sink_release(aconnector->dc_sink);
2491 aconnector->dc_sink = sink;
2492 dc_sink_retain(aconnector->dc_sink);
2493 amdgpu_dm_update_freesync_caps(connector,
2496 amdgpu_dm_update_freesync_caps(connector, NULL);
2497 if (!aconnector->dc_sink) {
2498 aconnector->dc_sink = aconnector->dc_em_sink;
2499 dc_sink_retain(aconnector->dc_sink);
2503 mutex_unlock(&dev->mode_config.mutex);
2506 dc_sink_release(sink);
2511 * TODO: temporary guard to look for proper fix
2512 * if this sink is MST sink, we should not do anything
2514 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2515 dc_sink_release(sink);
2519 if (aconnector->dc_sink == sink) {
2521 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2524 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2525 aconnector->connector_id);
2527 dc_sink_release(sink);
2531 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2532 aconnector->connector_id, aconnector->dc_sink, sink);
2534 mutex_lock(&dev->mode_config.mutex);
2537 * 1. Update status of the drm connector
2538 * 2. Send an event and let userspace tell us what to do
2542 * TODO: check if we still need the S3 mode update workaround.
2543 * If yes, put it here.
2545 if (aconnector->dc_sink) {
2546 amdgpu_dm_update_freesync_caps(connector, NULL);
2547 dc_sink_release(aconnector->dc_sink);
2550 aconnector->dc_sink = sink;
2551 dc_sink_retain(aconnector->dc_sink);
2552 if (sink->dc_edid.length == 0) {
2553 aconnector->edid = NULL;
2554 if (aconnector->dc_link->aux_mode) {
2555 drm_dp_cec_unset_edid(
2556 &aconnector->dm_dp_aux.aux);
2560 (struct edid *)sink->dc_edid.raw_edid;
2562 drm_connector_update_edid_property(connector,
2564 if (aconnector->dc_link->aux_mode)
2565 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2569 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2570 update_connector_ext_caps(aconnector);
2572 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2573 amdgpu_dm_update_freesync_caps(connector, NULL);
2574 drm_connector_update_edid_property(connector, NULL);
2575 aconnector->num_modes = 0;
2576 dc_sink_release(aconnector->dc_sink);
2577 aconnector->dc_sink = NULL;
2578 aconnector->edid = NULL;
2579 #ifdef CONFIG_DRM_AMD_DC_HDCP
2580 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2581 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2582 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2586 mutex_unlock(&dev->mode_config.mutex);
2588 update_subconnector_property(aconnector);
2591 dc_sink_release(sink);
2594 static void handle_hpd_irq(void *param)
2596 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2597 struct drm_connector *connector = &aconnector->base;
2598 struct drm_device *dev = connector->dev;
2599 enum dc_connection_type new_connection_type = dc_connection_none;
2600 struct amdgpu_device *adev = drm_to_adev(dev);
2601 #ifdef CONFIG_DRM_AMD_DC_HDCP
2602 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2605 if (adev->dm.disable_hpd_irq)
2609 * In case of failure or MST no need to update connector status or notify the OS
2610 * since (for MST case) MST does this in its own context.
2612 mutex_lock(&aconnector->hpd_lock);
2614 #ifdef CONFIG_DRM_AMD_DC_HDCP
2615 if (adev->dm.hdcp_workqueue) {
2616 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2617 dm_con_state->update_hdcp = true;
2620 if (aconnector->fake_enable)
2621 aconnector->fake_enable = false;
2623 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2624 DRM_ERROR("KMS: Failed to detect connector\n");
2626 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2627 emulated_link_detect(aconnector->dc_link);
2630 drm_modeset_lock_all(dev);
2631 dm_restore_drm_connector_state(dev, connector);
2632 drm_modeset_unlock_all(dev);
2634 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2635 drm_kms_helper_hotplug_event(dev);
2637 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2638 if (new_connection_type == dc_connection_none &&
2639 aconnector->dc_link->type == dc_connection_none)
2640 dm_set_dpms_off(aconnector->dc_link);
2642 amdgpu_dm_update_connector_after_detect(aconnector);
2644 drm_modeset_lock_all(dev);
2645 dm_restore_drm_connector_state(dev, connector);
2646 drm_modeset_unlock_all(dev);
2648 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2649 drm_kms_helper_hotplug_event(dev);
2651 mutex_unlock(&aconnector->hpd_lock);
2655 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2657 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2659 bool new_irq_handled = false;
2661 int dpcd_bytes_to_read;
2663 const int max_process_count = 30;
2664 int process_count = 0;
2666 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2668 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2669 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2670 /* DPCD 0x200 - 0x201 for downstream IRQ */
2671 dpcd_addr = DP_SINK_COUNT;
2673 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2674 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2675 dpcd_addr = DP_SINK_COUNT_ESI;
2678 dret = drm_dp_dpcd_read(
2679 &aconnector->dm_dp_aux.aux,
2682 dpcd_bytes_to_read);
2684 while (dret == dpcd_bytes_to_read &&
2685 process_count < max_process_count) {
2691 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2692 /* handle HPD short pulse irq */
2693 if (aconnector->mst_mgr.mst_state)
2695 &aconnector->mst_mgr,
2699 if (new_irq_handled) {
2700 /* ACK at DPCD to notify down stream */
2701 const int ack_dpcd_bytes_to_write =
2702 dpcd_bytes_to_read - 1;
2704 for (retry = 0; retry < 3; retry++) {
2707 wret = drm_dp_dpcd_write(
2708 &aconnector->dm_dp_aux.aux,
2711 ack_dpcd_bytes_to_write);
2712 if (wret == ack_dpcd_bytes_to_write)
2716 /* check if there is new irq to be handled */
2717 dret = drm_dp_dpcd_read(
2718 &aconnector->dm_dp_aux.aux,
2721 dpcd_bytes_to_read);
2723 new_irq_handled = false;
2729 if (process_count == max_process_count)
2730 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2733 static void handle_hpd_rx_irq(void *param)
2735 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2736 struct drm_connector *connector = &aconnector->base;
2737 struct drm_device *dev = connector->dev;
2738 struct dc_link *dc_link = aconnector->dc_link;
2739 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2740 bool result = false;
2741 enum dc_connection_type new_connection_type = dc_connection_none;
2742 struct amdgpu_device *adev = drm_to_adev(dev);
2743 union hpd_irq_data hpd_irq_data;
2746 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2748 if (adev->dm.disable_hpd_irq)
2753 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2754 * conflict, after implement i2c helper, this mutex should be
2757 mutex_lock(&aconnector->hpd_lock);
2759 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2761 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2762 (dc_link->type == dc_connection_mst_branch)) {
2763 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2765 dm_handle_hpd_rx_irq(aconnector);
2767 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2769 dm_handle_hpd_rx_irq(aconnector);
2775 * TODO: We need the lock to avoid touching DC state while it's being
2776 * modified during automated compliance testing, or when link loss
2777 * happens. While this should be split into subhandlers and proper
2778 * interfaces to avoid having to conditionally lock like this in the
2779 * outer layer, we need this workaround temporarily to allow MST
2780 * lightup in some scenarios to avoid timeout.
2782 if (!amdgpu_in_reset(adev) &&
2783 (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
2784 hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
2785 mutex_lock(&adev->dm.dc_lock);
2789 #ifdef CONFIG_DRM_AMD_DC_HDCP
2790 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2792 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2794 if (!amdgpu_in_reset(adev) && lock_flag)
2795 mutex_unlock(&adev->dm.dc_lock);
2798 if (result && !is_mst_root_connector) {
2799 /* Downstream Port status changed. */
2800 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2801 DRM_ERROR("KMS: Failed to detect connector\n");
2803 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2804 emulated_link_detect(dc_link);
2806 if (aconnector->fake_enable)
2807 aconnector->fake_enable = false;
2809 amdgpu_dm_update_connector_after_detect(aconnector);
2812 drm_modeset_lock_all(dev);
2813 dm_restore_drm_connector_state(dev, connector);
2814 drm_modeset_unlock_all(dev);
2816 drm_kms_helper_hotplug_event(dev);
2817 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2819 if (aconnector->fake_enable)
2820 aconnector->fake_enable = false;
2822 amdgpu_dm_update_connector_after_detect(aconnector);
2825 drm_modeset_lock_all(dev);
2826 dm_restore_drm_connector_state(dev, connector);
2827 drm_modeset_unlock_all(dev);
2829 drm_kms_helper_hotplug_event(dev);
2832 #ifdef CONFIG_DRM_AMD_DC_HDCP
2833 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2834 if (adev->dm.hdcp_workqueue)
2835 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2839 if (dc_link->type != dc_connection_mst_branch)
2840 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2842 mutex_unlock(&aconnector->hpd_lock);
2845 static void register_hpd_handlers(struct amdgpu_device *adev)
2847 struct drm_device *dev = adev_to_drm(adev);
2848 struct drm_connector *connector;
2849 struct amdgpu_dm_connector *aconnector;
2850 const struct dc_link *dc_link;
2851 struct dc_interrupt_params int_params = {0};
2853 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2854 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2856 list_for_each_entry(connector,
2857 &dev->mode_config.connector_list, head) {
2859 aconnector = to_amdgpu_dm_connector(connector);
2860 dc_link = aconnector->dc_link;
2862 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2863 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2864 int_params.irq_source = dc_link->irq_source_hpd;
2866 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2868 (void *) aconnector);
2871 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2873 /* Also register for DP short pulse (hpd_rx). */
2874 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2875 int_params.irq_source = dc_link->irq_source_hpd_rx;
2877 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2879 (void *) aconnector);
2884 #if defined(CONFIG_DRM_AMD_DC_SI)
2885 /* Register IRQ sources and initialize IRQ callbacks */
2886 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2888 struct dc *dc = adev->dm.dc;
2889 struct common_irq_params *c_irq_params;
2890 struct dc_interrupt_params int_params = {0};
2893 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2895 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2896 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2899 * Actions of amdgpu_irq_add_id():
2900 * 1. Register a set() function with base driver.
2901 * Base driver will call set() function to enable/disable an
2902 * interrupt in DC hardware.
2903 * 2. Register amdgpu_dm_irq_handler().
2904 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2905 * coming from DC hardware.
2906 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2907 * for acknowledging and handling. */
2909 /* Use VBLANK interrupt */
2910 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2911 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2913 DRM_ERROR("Failed to add crtc irq id!\n");
2917 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2918 int_params.irq_source =
2919 dc_interrupt_to_irq_source(dc, i+1 , 0);
2921 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2923 c_irq_params->adev = adev;
2924 c_irq_params->irq_src = int_params.irq_source;
2926 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2927 dm_crtc_high_irq, c_irq_params);
2930 /* Use GRPH_PFLIP interrupt */
2931 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2932 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2933 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2935 DRM_ERROR("Failed to add page flip irq id!\n");
2939 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2940 int_params.irq_source =
2941 dc_interrupt_to_irq_source(dc, i, 0);
2943 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2945 c_irq_params->adev = adev;
2946 c_irq_params->irq_src = int_params.irq_source;
2948 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2949 dm_pflip_high_irq, c_irq_params);
2954 r = amdgpu_irq_add_id(adev, client_id,
2955 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2957 DRM_ERROR("Failed to add hpd irq id!\n");
2961 register_hpd_handlers(adev);
2967 /* Register IRQ sources and initialize IRQ callbacks */
2968 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2970 struct dc *dc = adev->dm.dc;
2971 struct common_irq_params *c_irq_params;
2972 struct dc_interrupt_params int_params = {0};
2975 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2977 if (adev->asic_type >= CHIP_VEGA10)
2978 client_id = SOC15_IH_CLIENTID_DCE;
2980 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2981 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2984 * Actions of amdgpu_irq_add_id():
2985 * 1. Register a set() function with base driver.
2986 * Base driver will call set() function to enable/disable an
2987 * interrupt in DC hardware.
2988 * 2. Register amdgpu_dm_irq_handler().
2989 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2990 * coming from DC hardware.
2991 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2992 * for acknowledging and handling. */
2994 /* Use VBLANK interrupt */
2995 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2996 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2998 DRM_ERROR("Failed to add crtc irq id!\n");
3002 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3003 int_params.irq_source =
3004 dc_interrupt_to_irq_source(dc, i, 0);
3006 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3008 c_irq_params->adev = adev;
3009 c_irq_params->irq_src = int_params.irq_source;
3011 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3012 dm_crtc_high_irq, c_irq_params);
3015 /* Use VUPDATE interrupt */
3016 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3017 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3019 DRM_ERROR("Failed to add vupdate irq id!\n");
3023 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3024 int_params.irq_source =
3025 dc_interrupt_to_irq_source(dc, i, 0);
3027 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3029 c_irq_params->adev = adev;
3030 c_irq_params->irq_src = int_params.irq_source;
3032 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3033 dm_vupdate_high_irq, c_irq_params);
3036 /* Use GRPH_PFLIP interrupt */
3037 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3038 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3039 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3041 DRM_ERROR("Failed to add page flip irq id!\n");
3045 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3046 int_params.irq_source =
3047 dc_interrupt_to_irq_source(dc, i, 0);
3049 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3051 c_irq_params->adev = adev;
3052 c_irq_params->irq_src = int_params.irq_source;
3054 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3055 dm_pflip_high_irq, c_irq_params);
3060 r = amdgpu_irq_add_id(adev, client_id,
3061 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3063 DRM_ERROR("Failed to add hpd irq id!\n");
3067 register_hpd_handlers(adev);
3072 #if defined(CONFIG_DRM_AMD_DC_DCN)
3073 /* Register IRQ sources and initialize IRQ callbacks */
3074 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3076 struct dc *dc = adev->dm.dc;
3077 struct common_irq_params *c_irq_params;
3078 struct dc_interrupt_params int_params = {0};
3081 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3082 static const unsigned int vrtl_int_srcid[] = {
3083 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3084 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3085 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3086 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3087 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3088 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3092 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3093 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3096 * Actions of amdgpu_irq_add_id():
3097 * 1. Register a set() function with base driver.
3098 * Base driver will call set() function to enable/disable an
3099 * interrupt in DC hardware.
3100 * 2. Register amdgpu_dm_irq_handler().
3101 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3102 * coming from DC hardware.
3103 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3104 * for acknowledging and handling.
3107 /* Use VSTARTUP interrupt */
3108 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3109 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3111 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3114 DRM_ERROR("Failed to add crtc irq id!\n");
3118 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3119 int_params.irq_source =
3120 dc_interrupt_to_irq_source(dc, i, 0);
3122 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3124 c_irq_params->adev = adev;
3125 c_irq_params->irq_src = int_params.irq_source;
3127 amdgpu_dm_irq_register_interrupt(
3128 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3131 /* Use otg vertical line interrupt */
3132 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3133 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3134 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3135 vrtl_int_srcid[i], &adev->vline0_irq);
3138 DRM_ERROR("Failed to add vline0 irq id!\n");
3142 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3143 int_params.irq_source =
3144 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3146 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3147 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3151 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3152 - DC_IRQ_SOURCE_DC1_VLINE0];
3154 c_irq_params->adev = adev;
3155 c_irq_params->irq_src = int_params.irq_source;
3157 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3158 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3162 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3163 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3164 * to trigger at end of each vblank, regardless of state of the lock,
3165 * matching DCE behaviour.
3167 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3168 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3170 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3173 DRM_ERROR("Failed to add vupdate irq id!\n");
3177 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3178 int_params.irq_source =
3179 dc_interrupt_to_irq_source(dc, i, 0);
3181 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3183 c_irq_params->adev = adev;
3184 c_irq_params->irq_src = int_params.irq_source;
3186 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3187 dm_vupdate_high_irq, c_irq_params);
3190 /* Use GRPH_PFLIP interrupt */
3191 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3192 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3194 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3196 DRM_ERROR("Failed to add page flip irq id!\n");
3200 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3201 int_params.irq_source =
3202 dc_interrupt_to_irq_source(dc, i, 0);
3204 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3206 c_irq_params->adev = adev;
3207 c_irq_params->irq_src = int_params.irq_source;
3209 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3210 dm_pflip_high_irq, c_irq_params);
3215 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3218 DRM_ERROR("Failed to add hpd irq id!\n");
3222 register_hpd_handlers(adev);
3226 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3227 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3229 struct dc *dc = adev->dm.dc;
3230 struct common_irq_params *c_irq_params;
3231 struct dc_interrupt_params int_params = {0};
3234 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3235 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3237 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3238 &adev->dmub_outbox_irq);
3240 DRM_ERROR("Failed to add outbox irq id!\n");
3244 if (dc->ctx->dmub_srv) {
3245 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3246 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3247 int_params.irq_source =
3248 dc_interrupt_to_irq_source(dc, i, 0);
3250 c_irq_params = &adev->dm.dmub_outbox_params[0];
3252 c_irq_params->adev = adev;
3253 c_irq_params->irq_src = int_params.irq_source;
3255 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3256 dm_dmub_outbox1_low_irq, c_irq_params);
3264 * Acquires the lock for the atomic state object and returns
3265 * the new atomic state.
3267 * This should only be called during atomic check.
3269 static int dm_atomic_get_state(struct drm_atomic_state *state,
3270 struct dm_atomic_state **dm_state)
3272 struct drm_device *dev = state->dev;
3273 struct amdgpu_device *adev = drm_to_adev(dev);
3274 struct amdgpu_display_manager *dm = &adev->dm;
3275 struct drm_private_state *priv_state;
3280 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3281 if (IS_ERR(priv_state))
3282 return PTR_ERR(priv_state);
3284 *dm_state = to_dm_atomic_state(priv_state);
3289 static struct dm_atomic_state *
3290 dm_atomic_get_new_state(struct drm_atomic_state *state)
3292 struct drm_device *dev = state->dev;
3293 struct amdgpu_device *adev = drm_to_adev(dev);
3294 struct amdgpu_display_manager *dm = &adev->dm;
3295 struct drm_private_obj *obj;
3296 struct drm_private_state *new_obj_state;
3299 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3300 if (obj->funcs == dm->atomic_obj.funcs)
3301 return to_dm_atomic_state(new_obj_state);
3307 static struct drm_private_state *
3308 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3310 struct dm_atomic_state *old_state, *new_state;
3312 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3316 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3318 old_state = to_dm_atomic_state(obj->state);
3320 if (old_state && old_state->context)
3321 new_state->context = dc_copy_state(old_state->context);
3323 if (!new_state->context) {
3328 return &new_state->base;
3331 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3332 struct drm_private_state *state)
3334 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3336 if (dm_state && dm_state->context)
3337 dc_release_state(dm_state->context);
3342 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3343 .atomic_duplicate_state = dm_atomic_duplicate_state,
3344 .atomic_destroy_state = dm_atomic_destroy_state,
3347 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3349 struct dm_atomic_state *state;
3352 adev->mode_info.mode_config_initialized = true;
3354 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3355 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3357 adev_to_drm(adev)->mode_config.max_width = 16384;
3358 adev_to_drm(adev)->mode_config.max_height = 16384;
3360 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3361 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3362 /* indicates support for immediate flip */
3363 adev_to_drm(adev)->mode_config.async_page_flip = true;
3365 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3367 state = kzalloc(sizeof(*state), GFP_KERNEL);
3371 state->context = dc_create_state(adev->dm.dc);
3372 if (!state->context) {
3377 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3379 drm_atomic_private_obj_init(adev_to_drm(adev),
3380 &adev->dm.atomic_obj,
3382 &dm_atomic_state_funcs);
3384 r = amdgpu_display_modeset_create_props(adev);
3386 dc_release_state(state->context);
3391 r = amdgpu_dm_audio_init(adev);
3393 dc_release_state(state->context);
3401 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3402 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3403 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3405 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3406 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3408 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3410 #if defined(CONFIG_ACPI)
3411 struct amdgpu_dm_backlight_caps caps;
3413 memset(&caps, 0, sizeof(caps));
3415 if (dm->backlight_caps.caps_valid)
3418 amdgpu_acpi_get_backlight_caps(&caps);
3419 if (caps.caps_valid) {
3420 dm->backlight_caps.caps_valid = true;
3421 if (caps.aux_support)
3423 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3424 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3426 dm->backlight_caps.min_input_signal =
3427 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3428 dm->backlight_caps.max_input_signal =
3429 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3432 if (dm->backlight_caps.aux_support)
3435 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3436 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3440 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3441 unsigned *min, unsigned *max)
3446 if (caps->aux_support) {
3447 // Firmware limits are in nits, DC API wants millinits.
3448 *max = 1000 * caps->aux_max_input_signal;
3449 *min = 1000 * caps->aux_min_input_signal;
3451 // Firmware limits are 8-bit, PWM control is 16-bit.
3452 *max = 0x101 * caps->max_input_signal;
3453 *min = 0x101 * caps->min_input_signal;
3458 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3459 uint32_t brightness)
3463 if (!get_brightness_range(caps, &min, &max))
3466 // Rescale 0..255 to min..max
3467 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3468 AMDGPU_MAX_BL_LEVEL);
3471 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3472 uint32_t brightness)
3476 if (!get_brightness_range(caps, &min, &max))
3479 if (brightness < min)
3481 // Rescale min..max to 0..255
3482 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3486 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3487 u32 user_brightness)
3489 struct amdgpu_dm_backlight_caps caps;
3490 struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3491 u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
3495 amdgpu_dm_update_backlight_caps(dm);
3496 caps = dm->backlight_caps;
3498 for (i = 0; i < dm->num_of_edps; i++) {
3499 dm->brightness[i] = user_brightness;
3500 brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]);
3501 link[i] = (struct dc_link *)dm->backlight_link[i];
3504 /* Change brightness based on AUX property */
3505 if (caps.aux_support) {
3506 for (i = 0; i < dm->num_of_edps; i++) {
3507 rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
3508 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3510 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
3515 for (i = 0; i < dm->num_of_edps; i++) {
3516 rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
3518 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", i);
3527 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3529 struct amdgpu_display_manager *dm = bl_get_data(bd);
3531 amdgpu_dm_backlight_set_level(dm, bd->props.brightness);
3536 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
3538 struct amdgpu_dm_backlight_caps caps;
3540 amdgpu_dm_update_backlight_caps(dm);
3541 caps = dm->backlight_caps;
3543 if (caps.aux_support) {
3544 struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
3548 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3550 return dm->brightness[0];
3551 return convert_brightness_to_user(&caps, avg);
3553 int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
3555 if (ret == DC_ERROR_UNEXPECTED)
3556 return dm->brightness[0];
3557 return convert_brightness_to_user(&caps, ret);
3561 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3563 struct amdgpu_display_manager *dm = bl_get_data(bd);
3565 return amdgpu_dm_backlight_get_level(dm);
3568 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3569 .options = BL_CORE_SUSPENDRESUME,
3570 .get_brightness = amdgpu_dm_backlight_get_brightness,
3571 .update_status = amdgpu_dm_backlight_update_status,
3575 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3578 struct backlight_properties props = { 0 };
3581 amdgpu_dm_update_backlight_caps(dm);
3582 for (i = 0; i < dm->num_of_edps; i++)
3583 dm->brightness[i] = AMDGPU_MAX_BL_LEVEL;
3585 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3586 props.brightness = AMDGPU_MAX_BL_LEVEL;
3587 props.type = BACKLIGHT_RAW;
3589 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3590 adev_to_drm(dm->adev)->primary->index);
3592 dm->backlight_dev = backlight_device_register(bl_name,
3593 adev_to_drm(dm->adev)->dev,
3595 &amdgpu_dm_backlight_ops,
3598 if (IS_ERR(dm->backlight_dev))
3599 DRM_ERROR("DM: Backlight registration failed!\n");
3601 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3606 static int initialize_plane(struct amdgpu_display_manager *dm,
3607 struct amdgpu_mode_info *mode_info, int plane_id,
3608 enum drm_plane_type plane_type,
3609 const struct dc_plane_cap *plane_cap)
3611 struct drm_plane *plane;
3612 unsigned long possible_crtcs;
3615 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3617 DRM_ERROR("KMS: Failed to allocate plane\n");
3620 plane->type = plane_type;
3623 * HACK: IGT tests expect that the primary plane for a CRTC
3624 * can only have one possible CRTC. Only expose support for
3625 * any CRTC if they're not going to be used as a primary plane
3626 * for a CRTC - like overlay or underlay planes.
3628 possible_crtcs = 1 << plane_id;
3629 if (plane_id >= dm->dc->caps.max_streams)
3630 possible_crtcs = 0xff;
3632 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3635 DRM_ERROR("KMS: Failed to initialize plane\n");
3641 mode_info->planes[plane_id] = plane;
3647 static void register_backlight_device(struct amdgpu_display_manager *dm,
3648 struct dc_link *link)
3650 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3651 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3653 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3654 link->type != dc_connection_none) {
3656 * Event if registration failed, we should continue with
3657 * DM initialization because not having a backlight control
3658 * is better then a black screen.
3660 if (!dm->backlight_dev)
3661 amdgpu_dm_register_backlight_device(dm);
3663 if (dm->backlight_dev) {
3664 dm->backlight_link[dm->num_of_edps] = link;
3673 * In this architecture, the association
3674 * connector -> encoder -> crtc
3675 * id not really requried. The crtc and connector will hold the
3676 * display_index as an abstraction to use with DAL component
3678 * Returns 0 on success
3680 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3682 struct amdgpu_display_manager *dm = &adev->dm;
3684 struct amdgpu_dm_connector *aconnector = NULL;
3685 struct amdgpu_encoder *aencoder = NULL;
3686 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3688 int32_t primary_planes;
3689 enum dc_connection_type new_connection_type = dc_connection_none;
3690 const struct dc_plane_cap *plane;
3692 dm->display_indexes_num = dm->dc->caps.max_streams;
3693 /* Update the actual used number of crtc */
3694 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3696 link_cnt = dm->dc->caps.max_links;
3697 if (amdgpu_dm_mode_config_init(dm->adev)) {
3698 DRM_ERROR("DM: Failed to initialize mode config\n");
3702 /* There is one primary plane per CRTC */
3703 primary_planes = dm->dc->caps.max_streams;
3704 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3707 * Initialize primary planes, implicit planes for legacy IOCTLS.
3708 * Order is reversed to match iteration order in atomic check.
3710 for (i = (primary_planes - 1); i >= 0; i--) {
3711 plane = &dm->dc->caps.planes[i];
3713 if (initialize_plane(dm, mode_info, i,
3714 DRM_PLANE_TYPE_PRIMARY, plane)) {
3715 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3721 * Initialize overlay planes, index starting after primary planes.
3722 * These planes have a higher DRM index than the primary planes since
3723 * they should be considered as having a higher z-order.
3724 * Order is reversed to match iteration order in atomic check.
3726 * Only support DCN for now, and only expose one so we don't encourage
3727 * userspace to use up all the pipes.
3729 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3730 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3732 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3735 if (!plane->blends_with_above || !plane->blends_with_below)
3738 if (!plane->pixel_format_support.argb8888)
3741 if (initialize_plane(dm, NULL, primary_planes + i,
3742 DRM_PLANE_TYPE_OVERLAY, plane)) {
3743 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3747 /* Only create one overlay plane. */
3751 for (i = 0; i < dm->dc->caps.max_streams; i++)
3752 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3753 DRM_ERROR("KMS: Failed to initialize crtc\n");
3757 #if defined(CONFIG_DRM_AMD_DC_DCN)
3758 /* Use Outbox interrupt */
3759 switch (adev->asic_type) {
3760 case CHIP_SIENNA_CICHLID:
3761 case CHIP_NAVY_FLOUNDER:
3763 if (register_outbox_irq_handlers(dm->adev)) {
3764 DRM_ERROR("DM: Failed to initialize IRQ\n");
3769 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3773 /* loops over all connectors on the board */
3774 for (i = 0; i < link_cnt; i++) {
3775 struct dc_link *link = NULL;
3777 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3779 "KMS: Cannot support more than %d display indexes\n",
3780 AMDGPU_DM_MAX_DISPLAY_INDEX);
3784 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3788 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3792 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3793 DRM_ERROR("KMS: Failed to initialize encoder\n");
3797 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3798 DRM_ERROR("KMS: Failed to initialize connector\n");
3802 link = dc_get_link_at_index(dm->dc, i);
3804 if (!dc_link_detect_sink(link, &new_connection_type))
3805 DRM_ERROR("KMS: Failed to detect connector\n");
3807 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3808 emulated_link_detect(link);
3809 amdgpu_dm_update_connector_after_detect(aconnector);
3811 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3812 amdgpu_dm_update_connector_after_detect(aconnector);
3813 register_backlight_device(dm, link);
3814 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3815 amdgpu_dm_set_psr_caps(link);
3821 /* Software is initialized. Now we can register interrupt handlers. */
3822 switch (adev->asic_type) {
3823 #if defined(CONFIG_DRM_AMD_DC_SI)
3828 if (dce60_register_irq_handlers(dm->adev)) {
3829 DRM_ERROR("DM: Failed to initialize IRQ\n");
3843 case CHIP_POLARIS11:
3844 case CHIP_POLARIS10:
3845 case CHIP_POLARIS12:
3850 if (dce110_register_irq_handlers(dm->adev)) {
3851 DRM_ERROR("DM: Failed to initialize IRQ\n");
3855 #if defined(CONFIG_DRM_AMD_DC_DCN)
3861 case CHIP_SIENNA_CICHLID:
3862 case CHIP_NAVY_FLOUNDER:
3863 case CHIP_DIMGREY_CAVEFISH:
3864 case CHIP_BEIGE_GOBY:
3866 if (dcn10_register_irq_handlers(dm->adev)) {
3867 DRM_ERROR("DM: Failed to initialize IRQ\n");
3873 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3885 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3887 drm_mode_config_cleanup(dm->ddev);
3888 drm_atomic_private_obj_fini(&dm->atomic_obj);
3892 /******************************************************************************
3893 * amdgpu_display_funcs functions
3894 *****************************************************************************/
3897 * dm_bandwidth_update - program display watermarks
3899 * @adev: amdgpu_device pointer
3901 * Calculate and program the display watermarks and line buffer allocation.
3903 static void dm_bandwidth_update(struct amdgpu_device *adev)
3905 /* TODO: implement later */
3908 static const struct amdgpu_display_funcs dm_display_funcs = {
3909 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3910 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3911 .backlight_set_level = NULL, /* never called for DC */
3912 .backlight_get_level = NULL, /* never called for DC */
3913 .hpd_sense = NULL,/* called unconditionally */
3914 .hpd_set_polarity = NULL, /* called unconditionally */
3915 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3916 .page_flip_get_scanoutpos =
3917 dm_crtc_get_scanoutpos,/* called unconditionally */
3918 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3919 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3922 #if defined(CONFIG_DEBUG_KERNEL_DC)
3924 static ssize_t s3_debug_store(struct device *device,
3925 struct device_attribute *attr,
3931 struct drm_device *drm_dev = dev_get_drvdata(device);
3932 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3934 ret = kstrtoint(buf, 0, &s3_state);
3939 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3944 return ret == 0 ? count : 0;
3947 DEVICE_ATTR_WO(s3_debug);
3951 static int dm_early_init(void *handle)
3953 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3955 switch (adev->asic_type) {
3956 #if defined(CONFIG_DRM_AMD_DC_SI)
3960 adev->mode_info.num_crtc = 6;
3961 adev->mode_info.num_hpd = 6;
3962 adev->mode_info.num_dig = 6;
3965 adev->mode_info.num_crtc = 2;
3966 adev->mode_info.num_hpd = 2;
3967 adev->mode_info.num_dig = 2;
3972 adev->mode_info.num_crtc = 6;
3973 adev->mode_info.num_hpd = 6;
3974 adev->mode_info.num_dig = 6;
3977 adev->mode_info.num_crtc = 4;
3978 adev->mode_info.num_hpd = 6;
3979 adev->mode_info.num_dig = 7;
3983 adev->mode_info.num_crtc = 2;
3984 adev->mode_info.num_hpd = 6;
3985 adev->mode_info.num_dig = 6;
3989 adev->mode_info.num_crtc = 6;
3990 adev->mode_info.num_hpd = 6;
3991 adev->mode_info.num_dig = 7;
3994 adev->mode_info.num_crtc = 3;
3995 adev->mode_info.num_hpd = 6;
3996 adev->mode_info.num_dig = 9;
3999 adev->mode_info.num_crtc = 2;
4000 adev->mode_info.num_hpd = 6;
4001 adev->mode_info.num_dig = 9;
4003 case CHIP_POLARIS11:
4004 case CHIP_POLARIS12:
4005 adev->mode_info.num_crtc = 5;
4006 adev->mode_info.num_hpd = 5;
4007 adev->mode_info.num_dig = 5;
4009 case CHIP_POLARIS10:
4011 adev->mode_info.num_crtc = 6;
4012 adev->mode_info.num_hpd = 6;
4013 adev->mode_info.num_dig = 6;
4018 adev->mode_info.num_crtc = 6;
4019 adev->mode_info.num_hpd = 6;
4020 adev->mode_info.num_dig = 6;
4022 #if defined(CONFIG_DRM_AMD_DC_DCN)
4026 adev->mode_info.num_crtc = 4;
4027 adev->mode_info.num_hpd = 4;
4028 adev->mode_info.num_dig = 4;
4032 case CHIP_SIENNA_CICHLID:
4033 case CHIP_NAVY_FLOUNDER:
4034 adev->mode_info.num_crtc = 6;
4035 adev->mode_info.num_hpd = 6;
4036 adev->mode_info.num_dig = 6;
4039 case CHIP_DIMGREY_CAVEFISH:
4040 adev->mode_info.num_crtc = 5;
4041 adev->mode_info.num_hpd = 5;
4042 adev->mode_info.num_dig = 5;
4044 case CHIP_BEIGE_GOBY:
4045 adev->mode_info.num_crtc = 2;
4046 adev->mode_info.num_hpd = 2;
4047 adev->mode_info.num_dig = 2;
4051 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4055 amdgpu_dm_set_irq_funcs(adev);
4057 if (adev->mode_info.funcs == NULL)
4058 adev->mode_info.funcs = &dm_display_funcs;
4061 * Note: Do NOT change adev->audio_endpt_rreg and
4062 * adev->audio_endpt_wreg because they are initialised in
4063 * amdgpu_device_init()
4065 #if defined(CONFIG_DEBUG_KERNEL_DC)
4067 adev_to_drm(adev)->dev,
4068 &dev_attr_s3_debug);
4074 static bool modeset_required(struct drm_crtc_state *crtc_state,
4075 struct dc_stream_state *new_stream,
4076 struct dc_stream_state *old_stream)
4078 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4081 static bool modereset_required(struct drm_crtc_state *crtc_state)
4083 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4086 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4088 drm_encoder_cleanup(encoder);
4092 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4093 .destroy = amdgpu_dm_encoder_destroy,
4097 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4098 struct drm_framebuffer *fb,
4099 int *min_downscale, int *max_upscale)
4101 struct amdgpu_device *adev = drm_to_adev(dev);
4102 struct dc *dc = adev->dm.dc;
4103 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4104 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4106 switch (fb->format->format) {
4107 case DRM_FORMAT_P010:
4108 case DRM_FORMAT_NV12:
4109 case DRM_FORMAT_NV21:
4110 *max_upscale = plane_cap->max_upscale_factor.nv12;
4111 *min_downscale = plane_cap->max_downscale_factor.nv12;
4114 case DRM_FORMAT_XRGB16161616F:
4115 case DRM_FORMAT_ARGB16161616F:
4116 case DRM_FORMAT_XBGR16161616F:
4117 case DRM_FORMAT_ABGR16161616F:
4118 *max_upscale = plane_cap->max_upscale_factor.fp16;
4119 *min_downscale = plane_cap->max_downscale_factor.fp16;
4123 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4124 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4129 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4130 * scaling factor of 1.0 == 1000 units.
4132 if (*max_upscale == 1)
4133 *max_upscale = 1000;
4135 if (*min_downscale == 1)
4136 *min_downscale = 1000;
4140 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4141 struct dc_scaling_info *scaling_info)
4143 int scale_w, scale_h, min_downscale, max_upscale;
4145 memset(scaling_info, 0, sizeof(*scaling_info));
4147 /* Source is fixed 16.16 but we ignore mantissa for now... */
4148 scaling_info->src_rect.x = state->src_x >> 16;
4149 scaling_info->src_rect.y = state->src_y >> 16;
4152 * For reasons we don't (yet) fully understand a non-zero
4153 * src_y coordinate into an NV12 buffer can cause a
4154 * system hang. To avoid hangs (and maybe be overly cautious)
4155 * let's reject both non-zero src_x and src_y.
4157 * We currently know of only one use-case to reproduce a
4158 * scenario with non-zero src_x and src_y for NV12, which
4159 * is to gesture the YouTube Android app into full screen
4163 state->fb->format->format == DRM_FORMAT_NV12 &&
4164 (scaling_info->src_rect.x != 0 ||
4165 scaling_info->src_rect.y != 0))
4168 scaling_info->src_rect.width = state->src_w >> 16;
4169 if (scaling_info->src_rect.width == 0)
4172 scaling_info->src_rect.height = state->src_h >> 16;
4173 if (scaling_info->src_rect.height == 0)
4176 scaling_info->dst_rect.x = state->crtc_x;
4177 scaling_info->dst_rect.y = state->crtc_y;
4179 if (state->crtc_w == 0)
4182 scaling_info->dst_rect.width = state->crtc_w;
4184 if (state->crtc_h == 0)
4187 scaling_info->dst_rect.height = state->crtc_h;
4189 /* DRM doesn't specify clipping on destination output. */
4190 scaling_info->clip_rect = scaling_info->dst_rect;
4192 /* Validate scaling per-format with DC plane caps */
4193 if (state->plane && state->plane->dev && state->fb) {
4194 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4195 &min_downscale, &max_upscale);
4197 min_downscale = 250;
4198 max_upscale = 16000;
4201 scale_w = scaling_info->dst_rect.width * 1000 /
4202 scaling_info->src_rect.width;
4204 if (scale_w < min_downscale || scale_w > max_upscale)
4207 scale_h = scaling_info->dst_rect.height * 1000 /
4208 scaling_info->src_rect.height;
4210 if (scale_h < min_downscale || scale_h > max_upscale)
4214 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4215 * assume reasonable defaults based on the format.
4222 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4223 uint64_t tiling_flags)
4225 /* Fill GFX8 params */
4226 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4227 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4229 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4230 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4231 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4232 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4233 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4235 /* XXX fix me for VI */
4236 tiling_info->gfx8.num_banks = num_banks;
4237 tiling_info->gfx8.array_mode =
4238 DC_ARRAY_2D_TILED_THIN1;
4239 tiling_info->gfx8.tile_split = tile_split;
4240 tiling_info->gfx8.bank_width = bankw;
4241 tiling_info->gfx8.bank_height = bankh;
4242 tiling_info->gfx8.tile_aspect = mtaspect;
4243 tiling_info->gfx8.tile_mode =
4244 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4245 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4246 == DC_ARRAY_1D_TILED_THIN1) {
4247 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4250 tiling_info->gfx8.pipe_config =
4251 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4255 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4256 union dc_tiling_info *tiling_info)
4258 tiling_info->gfx9.num_pipes =
4259 adev->gfx.config.gb_addr_config_fields.num_pipes;
4260 tiling_info->gfx9.num_banks =
4261 adev->gfx.config.gb_addr_config_fields.num_banks;
4262 tiling_info->gfx9.pipe_interleave =
4263 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4264 tiling_info->gfx9.num_shader_engines =
4265 adev->gfx.config.gb_addr_config_fields.num_se;
4266 tiling_info->gfx9.max_compressed_frags =
4267 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4268 tiling_info->gfx9.num_rb_per_se =
4269 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4270 tiling_info->gfx9.shaderEnable = 1;
4271 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4272 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4273 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4274 adev->asic_type == CHIP_BEIGE_GOBY ||
4275 adev->asic_type == CHIP_VANGOGH)
4276 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4280 validate_dcc(struct amdgpu_device *adev,
4281 const enum surface_pixel_format format,
4282 const enum dc_rotation_angle rotation,
4283 const union dc_tiling_info *tiling_info,
4284 const struct dc_plane_dcc_param *dcc,
4285 const struct dc_plane_address *address,
4286 const struct plane_size *plane_size)
4288 struct dc *dc = adev->dm.dc;
4289 struct dc_dcc_surface_param input;
4290 struct dc_surface_dcc_cap output;
4292 memset(&input, 0, sizeof(input));
4293 memset(&output, 0, sizeof(output));
4298 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4299 !dc->cap_funcs.get_dcc_compression_cap)
4302 input.format = format;
4303 input.surface_size.width = plane_size->surface_size.width;
4304 input.surface_size.height = plane_size->surface_size.height;
4305 input.swizzle_mode = tiling_info->gfx9.swizzle;
4307 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4308 input.scan = SCAN_DIRECTION_HORIZONTAL;
4309 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4310 input.scan = SCAN_DIRECTION_VERTICAL;
4312 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4315 if (!output.capable)
4318 if (dcc->independent_64b_blks == 0 &&
4319 output.grph.rgb.independent_64b_blks != 0)
4326 modifier_has_dcc(uint64_t modifier)
4328 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4332 modifier_gfx9_swizzle_mode(uint64_t modifier)
4334 if (modifier == DRM_FORMAT_MOD_LINEAR)
4337 return AMD_FMT_MOD_GET(TILE, modifier);
4340 static const struct drm_format_info *
4341 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4343 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4347 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4348 union dc_tiling_info *tiling_info,
4351 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4352 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4353 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4354 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4356 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4358 if (!IS_AMD_FMT_MOD(modifier))
4361 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4362 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4364 if (adev->family >= AMDGPU_FAMILY_NV) {
4365 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4367 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4369 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4373 enum dm_micro_swizzle {
4374 MICRO_SWIZZLE_Z = 0,
4375 MICRO_SWIZZLE_S = 1,
4376 MICRO_SWIZZLE_D = 2,
4380 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4384 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4385 const struct drm_format_info *info = drm_format_info(format);
4388 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4394 * We always have to allow these modifiers:
4395 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4396 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4398 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4399 modifier == DRM_FORMAT_MOD_INVALID) {
4403 /* Check that the modifier is on the list of the plane's supported modifiers. */
4404 for (i = 0; i < plane->modifier_count; i++) {
4405 if (modifier == plane->modifiers[i])
4408 if (i == plane->modifier_count)
4412 * For D swizzle the canonical modifier depends on the bpp, so check
4415 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4416 adev->family >= AMDGPU_FAMILY_NV) {
4417 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4421 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4425 if (modifier_has_dcc(modifier)) {
4426 /* Per radeonsi comments 16/64 bpp are more complicated. */
4427 if (info->cpp[0] != 4)
4429 /* We support multi-planar formats, but not when combined with
4430 * additional DCC metadata planes. */
4431 if (info->num_planes > 1)
4439 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4444 if (*cap - *size < 1) {
4445 uint64_t new_cap = *cap * 2;
4446 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4454 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4460 (*mods)[*size] = mod;
4465 add_gfx9_modifiers(const struct amdgpu_device *adev,
4466 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4468 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4469 int pipe_xor_bits = min(8, pipes +
4470 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4471 int bank_xor_bits = min(8 - pipe_xor_bits,
4472 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4473 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4474 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4477 if (adev->family == AMDGPU_FAMILY_RV) {
4478 /* Raven2 and later */
4479 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4482 * No _D DCC swizzles yet because we only allow 32bpp, which
4483 * doesn't support _D on DCN
4486 if (has_constant_encode) {
4487 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4488 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4489 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4490 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4491 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4492 AMD_FMT_MOD_SET(DCC, 1) |
4493 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4494 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4495 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4498 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4499 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4500 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4501 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4502 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4503 AMD_FMT_MOD_SET(DCC, 1) |
4504 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4505 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4506 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4508 if (has_constant_encode) {
4509 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4510 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4511 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4512 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4513 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4514 AMD_FMT_MOD_SET(DCC, 1) |
4515 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4516 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4517 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4519 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4520 AMD_FMT_MOD_SET(RB, rb) |
4521 AMD_FMT_MOD_SET(PIPE, pipes));
4524 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4525 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4526 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4527 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4528 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4529 AMD_FMT_MOD_SET(DCC, 1) |
4530 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4531 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4532 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4533 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4534 AMD_FMT_MOD_SET(RB, rb) |
4535 AMD_FMT_MOD_SET(PIPE, pipes));
4539 * Only supported for 64bpp on Raven, will be filtered on format in
4540 * dm_plane_format_mod_supported.
4542 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4543 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4544 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4545 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4546 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4548 if (adev->family == AMDGPU_FAMILY_RV) {
4549 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4550 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4551 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4552 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4553 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4557 * Only supported for 64bpp on Raven, will be filtered on format in
4558 * dm_plane_format_mod_supported.
4560 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4561 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4562 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4564 if (adev->family == AMDGPU_FAMILY_RV) {
4565 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4566 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4567 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4572 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4573 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4575 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4577 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4578 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4579 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4580 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4581 AMD_FMT_MOD_SET(DCC, 1) |
4582 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4583 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4584 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4586 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4587 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4588 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4589 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4590 AMD_FMT_MOD_SET(DCC, 1) |
4591 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4592 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4593 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4594 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4596 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4597 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4598 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4599 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4601 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4602 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4603 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4604 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4607 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4608 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4609 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4610 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4612 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4613 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4614 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4618 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4619 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4621 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4622 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4624 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4625 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4626 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4627 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4628 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4629 AMD_FMT_MOD_SET(DCC, 1) |
4630 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4631 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4632 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4633 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4635 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4636 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4637 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4638 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4639 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4640 AMD_FMT_MOD_SET(DCC, 1) |
4641 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4642 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4643 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4644 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4645 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4647 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4648 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4649 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4650 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4651 AMD_FMT_MOD_SET(PACKERS, pkrs));
4653 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4654 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4655 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4656 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4657 AMD_FMT_MOD_SET(PACKERS, pkrs));
4659 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4660 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4661 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4662 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4664 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4665 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4666 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4670 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4672 uint64_t size = 0, capacity = 128;
4675 /* We have not hooked up any pre-GFX9 modifiers. */
4676 if (adev->family < AMDGPU_FAMILY_AI)
4679 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4681 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4682 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4683 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4684 return *mods ? 0 : -ENOMEM;
4687 switch (adev->family) {
4688 case AMDGPU_FAMILY_AI:
4689 case AMDGPU_FAMILY_RV:
4690 add_gfx9_modifiers(adev, mods, &size, &capacity);
4692 case AMDGPU_FAMILY_NV:
4693 case AMDGPU_FAMILY_VGH:
4694 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4695 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4697 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4701 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4703 /* INVALID marks the end of the list. */
4704 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4713 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4714 const struct amdgpu_framebuffer *afb,
4715 const enum surface_pixel_format format,
4716 const enum dc_rotation_angle rotation,
4717 const struct plane_size *plane_size,
4718 union dc_tiling_info *tiling_info,
4719 struct dc_plane_dcc_param *dcc,
4720 struct dc_plane_address *address,
4721 const bool force_disable_dcc)
4723 const uint64_t modifier = afb->base.modifier;
4726 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4727 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4729 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4730 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4733 dcc->meta_pitch = afb->base.pitches[1];
4734 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4736 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4737 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4740 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4748 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4749 const struct amdgpu_framebuffer *afb,
4750 const enum surface_pixel_format format,
4751 const enum dc_rotation_angle rotation,
4752 const uint64_t tiling_flags,
4753 union dc_tiling_info *tiling_info,
4754 struct plane_size *plane_size,
4755 struct dc_plane_dcc_param *dcc,
4756 struct dc_plane_address *address,
4758 bool force_disable_dcc)
4760 const struct drm_framebuffer *fb = &afb->base;
4763 memset(tiling_info, 0, sizeof(*tiling_info));
4764 memset(plane_size, 0, sizeof(*plane_size));
4765 memset(dcc, 0, sizeof(*dcc));
4766 memset(address, 0, sizeof(*address));
4768 address->tmz_surface = tmz_surface;
4770 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4771 uint64_t addr = afb->address + fb->offsets[0];
4773 plane_size->surface_size.x = 0;
4774 plane_size->surface_size.y = 0;
4775 plane_size->surface_size.width = fb->width;
4776 plane_size->surface_size.height = fb->height;
4777 plane_size->surface_pitch =
4778 fb->pitches[0] / fb->format->cpp[0];
4780 address->type = PLN_ADDR_TYPE_GRAPHICS;
4781 address->grph.addr.low_part = lower_32_bits(addr);
4782 address->grph.addr.high_part = upper_32_bits(addr);
4783 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4784 uint64_t luma_addr = afb->address + fb->offsets[0];
4785 uint64_t chroma_addr = afb->address + fb->offsets[1];
4787 plane_size->surface_size.x = 0;
4788 plane_size->surface_size.y = 0;
4789 plane_size->surface_size.width = fb->width;
4790 plane_size->surface_size.height = fb->height;
4791 plane_size->surface_pitch =
4792 fb->pitches[0] / fb->format->cpp[0];
4794 plane_size->chroma_size.x = 0;
4795 plane_size->chroma_size.y = 0;
4796 /* TODO: set these based on surface format */
4797 plane_size->chroma_size.width = fb->width / 2;
4798 plane_size->chroma_size.height = fb->height / 2;
4800 plane_size->chroma_pitch =
4801 fb->pitches[1] / fb->format->cpp[1];
4803 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4804 address->video_progressive.luma_addr.low_part =
4805 lower_32_bits(luma_addr);
4806 address->video_progressive.luma_addr.high_part =
4807 upper_32_bits(luma_addr);
4808 address->video_progressive.chroma_addr.low_part =
4809 lower_32_bits(chroma_addr);
4810 address->video_progressive.chroma_addr.high_part =
4811 upper_32_bits(chroma_addr);
4814 if (adev->family >= AMDGPU_FAMILY_AI) {
4815 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4816 rotation, plane_size,
4823 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4830 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4831 bool *per_pixel_alpha, bool *global_alpha,
4832 int *global_alpha_value)
4834 *per_pixel_alpha = false;
4835 *global_alpha = false;
4836 *global_alpha_value = 0xff;
4838 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4841 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4842 static const uint32_t alpha_formats[] = {
4843 DRM_FORMAT_ARGB8888,
4844 DRM_FORMAT_RGBA8888,
4845 DRM_FORMAT_ABGR8888,
4847 uint32_t format = plane_state->fb->format->format;
4850 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4851 if (format == alpha_formats[i]) {
4852 *per_pixel_alpha = true;
4858 if (plane_state->alpha < 0xffff) {
4859 *global_alpha = true;
4860 *global_alpha_value = plane_state->alpha >> 8;
4865 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4866 const enum surface_pixel_format format,
4867 enum dc_color_space *color_space)
4871 *color_space = COLOR_SPACE_SRGB;
4873 /* DRM color properties only affect non-RGB formats. */
4874 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4877 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4879 switch (plane_state->color_encoding) {
4880 case DRM_COLOR_YCBCR_BT601:
4882 *color_space = COLOR_SPACE_YCBCR601;
4884 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4887 case DRM_COLOR_YCBCR_BT709:
4889 *color_space = COLOR_SPACE_YCBCR709;
4891 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4894 case DRM_COLOR_YCBCR_BT2020:
4896 *color_space = COLOR_SPACE_2020_YCBCR;
4909 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4910 const struct drm_plane_state *plane_state,
4911 const uint64_t tiling_flags,
4912 struct dc_plane_info *plane_info,
4913 struct dc_plane_address *address,
4915 bool force_disable_dcc)
4917 const struct drm_framebuffer *fb = plane_state->fb;
4918 const struct amdgpu_framebuffer *afb =
4919 to_amdgpu_framebuffer(plane_state->fb);
4922 memset(plane_info, 0, sizeof(*plane_info));
4924 switch (fb->format->format) {
4926 plane_info->format =
4927 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4929 case DRM_FORMAT_RGB565:
4930 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4932 case DRM_FORMAT_XRGB8888:
4933 case DRM_FORMAT_ARGB8888:
4934 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4936 case DRM_FORMAT_XRGB2101010:
4937 case DRM_FORMAT_ARGB2101010:
4938 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4940 case DRM_FORMAT_XBGR2101010:
4941 case DRM_FORMAT_ABGR2101010:
4942 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4944 case DRM_FORMAT_XBGR8888:
4945 case DRM_FORMAT_ABGR8888:
4946 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4948 case DRM_FORMAT_NV21:
4949 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4951 case DRM_FORMAT_NV12:
4952 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4954 case DRM_FORMAT_P010:
4955 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4957 case DRM_FORMAT_XRGB16161616F:
4958 case DRM_FORMAT_ARGB16161616F:
4959 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4961 case DRM_FORMAT_XBGR16161616F:
4962 case DRM_FORMAT_ABGR16161616F:
4963 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4967 "Unsupported screen format %p4cc\n",
4968 &fb->format->format);
4972 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4973 case DRM_MODE_ROTATE_0:
4974 plane_info->rotation = ROTATION_ANGLE_0;
4976 case DRM_MODE_ROTATE_90:
4977 plane_info->rotation = ROTATION_ANGLE_90;
4979 case DRM_MODE_ROTATE_180:
4980 plane_info->rotation = ROTATION_ANGLE_180;
4982 case DRM_MODE_ROTATE_270:
4983 plane_info->rotation = ROTATION_ANGLE_270;
4986 plane_info->rotation = ROTATION_ANGLE_0;
4990 plane_info->visible = true;
4991 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4993 plane_info->layer_index = 0;
4995 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4996 &plane_info->color_space);
5000 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5001 plane_info->rotation, tiling_flags,
5002 &plane_info->tiling_info,
5003 &plane_info->plane_size,
5004 &plane_info->dcc, address, tmz_surface,
5009 fill_blending_from_plane_state(
5010 plane_state, &plane_info->per_pixel_alpha,
5011 &plane_info->global_alpha, &plane_info->global_alpha_value);
5016 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5017 struct dc_plane_state *dc_plane_state,
5018 struct drm_plane_state *plane_state,
5019 struct drm_crtc_state *crtc_state)
5021 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5022 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5023 struct dc_scaling_info scaling_info;
5024 struct dc_plane_info plane_info;
5026 bool force_disable_dcc = false;
5028 ret = fill_dc_scaling_info(plane_state, &scaling_info);
5032 dc_plane_state->src_rect = scaling_info.src_rect;
5033 dc_plane_state->dst_rect = scaling_info.dst_rect;
5034 dc_plane_state->clip_rect = scaling_info.clip_rect;
5035 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5037 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5038 ret = fill_dc_plane_info_and_addr(adev, plane_state,
5041 &dc_plane_state->address,
5047 dc_plane_state->format = plane_info.format;
5048 dc_plane_state->color_space = plane_info.color_space;
5049 dc_plane_state->format = plane_info.format;
5050 dc_plane_state->plane_size = plane_info.plane_size;
5051 dc_plane_state->rotation = plane_info.rotation;
5052 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5053 dc_plane_state->stereo_format = plane_info.stereo_format;
5054 dc_plane_state->tiling_info = plane_info.tiling_info;
5055 dc_plane_state->visible = plane_info.visible;
5056 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5057 dc_plane_state->global_alpha = plane_info.global_alpha;
5058 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5059 dc_plane_state->dcc = plane_info.dcc;
5060 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5061 dc_plane_state->flip_int_enabled = true;
5064 * Always set input transfer function, since plane state is refreshed
5067 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5074 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5075 const struct dm_connector_state *dm_state,
5076 struct dc_stream_state *stream)
5078 enum amdgpu_rmx_type rmx_type;
5080 struct rect src = { 0 }; /* viewport in composition space*/
5081 struct rect dst = { 0 }; /* stream addressable area */
5083 /* no mode. nothing to be done */
5087 /* Full screen scaling by default */
5088 src.width = mode->hdisplay;
5089 src.height = mode->vdisplay;
5090 dst.width = stream->timing.h_addressable;
5091 dst.height = stream->timing.v_addressable;
5094 rmx_type = dm_state->scaling;
5095 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5096 if (src.width * dst.height <
5097 src.height * dst.width) {
5098 /* height needs less upscaling/more downscaling */
5099 dst.width = src.width *
5100 dst.height / src.height;
5102 /* width needs less upscaling/more downscaling */
5103 dst.height = src.height *
5104 dst.width / src.width;
5106 } else if (rmx_type == RMX_CENTER) {
5110 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5111 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5113 if (dm_state->underscan_enable) {
5114 dst.x += dm_state->underscan_hborder / 2;
5115 dst.y += dm_state->underscan_vborder / 2;
5116 dst.width -= dm_state->underscan_hborder;
5117 dst.height -= dm_state->underscan_vborder;
5124 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5125 dst.x, dst.y, dst.width, dst.height);
5129 static enum dc_color_depth
5130 convert_color_depth_from_display_info(const struct drm_connector *connector,
5131 bool is_y420, int requested_bpc)
5138 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5139 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5141 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5143 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5146 bpc = (uint8_t)connector->display_info.bpc;
5147 /* Assume 8 bpc by default if no bpc is specified. */
5148 bpc = bpc ? bpc : 8;
5151 if (requested_bpc > 0) {
5153 * Cap display bpc based on the user requested value.
5155 * The value for state->max_bpc may not correctly updated
5156 * depending on when the connector gets added to the state
5157 * or if this was called outside of atomic check, so it
5158 * can't be used directly.
5160 bpc = min_t(u8, bpc, requested_bpc);
5162 /* Round down to the nearest even number. */
5163 bpc = bpc - (bpc & 1);
5169 * Temporary Work around, DRM doesn't parse color depth for
5170 * EDID revision before 1.4
5171 * TODO: Fix edid parsing
5173 return COLOR_DEPTH_888;
5175 return COLOR_DEPTH_666;
5177 return COLOR_DEPTH_888;
5179 return COLOR_DEPTH_101010;
5181 return COLOR_DEPTH_121212;
5183 return COLOR_DEPTH_141414;
5185 return COLOR_DEPTH_161616;
5187 return COLOR_DEPTH_UNDEFINED;
5191 static enum dc_aspect_ratio
5192 get_aspect_ratio(const struct drm_display_mode *mode_in)
5194 /* 1-1 mapping, since both enums follow the HDMI spec. */
5195 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5198 static enum dc_color_space
5199 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5201 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5203 switch (dc_crtc_timing->pixel_encoding) {
5204 case PIXEL_ENCODING_YCBCR422:
5205 case PIXEL_ENCODING_YCBCR444:
5206 case PIXEL_ENCODING_YCBCR420:
5209 * 27030khz is the separation point between HDTV and SDTV
5210 * according to HDMI spec, we use YCbCr709 and YCbCr601
5213 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5214 if (dc_crtc_timing->flags.Y_ONLY)
5216 COLOR_SPACE_YCBCR709_LIMITED;
5218 color_space = COLOR_SPACE_YCBCR709;
5220 if (dc_crtc_timing->flags.Y_ONLY)
5222 COLOR_SPACE_YCBCR601_LIMITED;
5224 color_space = COLOR_SPACE_YCBCR601;
5229 case PIXEL_ENCODING_RGB:
5230 color_space = COLOR_SPACE_SRGB;
5241 static bool adjust_colour_depth_from_display_info(
5242 struct dc_crtc_timing *timing_out,
5243 const struct drm_display_info *info)
5245 enum dc_color_depth depth = timing_out->display_color_depth;
5248 normalized_clk = timing_out->pix_clk_100hz / 10;
5249 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5250 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5251 normalized_clk /= 2;
5252 /* Adjusting pix clock following on HDMI spec based on colour depth */
5254 case COLOR_DEPTH_888:
5256 case COLOR_DEPTH_101010:
5257 normalized_clk = (normalized_clk * 30) / 24;
5259 case COLOR_DEPTH_121212:
5260 normalized_clk = (normalized_clk * 36) / 24;
5262 case COLOR_DEPTH_161616:
5263 normalized_clk = (normalized_clk * 48) / 24;
5266 /* The above depths are the only ones valid for HDMI. */
5269 if (normalized_clk <= info->max_tmds_clock) {
5270 timing_out->display_color_depth = depth;
5273 } while (--depth > COLOR_DEPTH_666);
5277 static void fill_stream_properties_from_drm_display_mode(
5278 struct dc_stream_state *stream,
5279 const struct drm_display_mode *mode_in,
5280 const struct drm_connector *connector,
5281 const struct drm_connector_state *connector_state,
5282 const struct dc_stream_state *old_stream,
5285 struct dc_crtc_timing *timing_out = &stream->timing;
5286 const struct drm_display_info *info = &connector->display_info;
5287 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5288 struct hdmi_vendor_infoframe hv_frame;
5289 struct hdmi_avi_infoframe avi_frame;
5291 memset(&hv_frame, 0, sizeof(hv_frame));
5292 memset(&avi_frame, 0, sizeof(avi_frame));
5294 timing_out->h_border_left = 0;
5295 timing_out->h_border_right = 0;
5296 timing_out->v_border_top = 0;
5297 timing_out->v_border_bottom = 0;
5298 /* TODO: un-hardcode */
5299 if (drm_mode_is_420_only(info, mode_in)
5300 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5301 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5302 else if (drm_mode_is_420_also(info, mode_in)
5303 && aconnector->force_yuv420_output)
5304 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5305 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5306 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5307 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5309 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5311 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5312 timing_out->display_color_depth = convert_color_depth_from_display_info(
5314 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5316 timing_out->scan_type = SCANNING_TYPE_NODATA;
5317 timing_out->hdmi_vic = 0;
5320 timing_out->vic = old_stream->timing.vic;
5321 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5322 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5324 timing_out->vic = drm_match_cea_mode(mode_in);
5325 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5326 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5327 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5328 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5331 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5332 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5333 timing_out->vic = avi_frame.video_code;
5334 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5335 timing_out->hdmi_vic = hv_frame.vic;
5338 if (is_freesync_video_mode(mode_in, aconnector)) {
5339 timing_out->h_addressable = mode_in->hdisplay;
5340 timing_out->h_total = mode_in->htotal;
5341 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5342 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5343 timing_out->v_total = mode_in->vtotal;
5344 timing_out->v_addressable = mode_in->vdisplay;
5345 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5346 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5347 timing_out->pix_clk_100hz = mode_in->clock * 10;
5349 timing_out->h_addressable = mode_in->crtc_hdisplay;
5350 timing_out->h_total = mode_in->crtc_htotal;
5351 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5352 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5353 timing_out->v_total = mode_in->crtc_vtotal;
5354 timing_out->v_addressable = mode_in->crtc_vdisplay;
5355 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5356 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5357 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5360 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5362 stream->output_color_space = get_output_color_space(timing_out);
5364 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5365 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5366 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5367 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5368 drm_mode_is_420_also(info, mode_in) &&
5369 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5370 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5371 adjust_colour_depth_from_display_info(timing_out, info);
5376 static void fill_audio_info(struct audio_info *audio_info,
5377 const struct drm_connector *drm_connector,
5378 const struct dc_sink *dc_sink)
5381 int cea_revision = 0;
5382 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5384 audio_info->manufacture_id = edid_caps->manufacturer_id;
5385 audio_info->product_id = edid_caps->product_id;
5387 cea_revision = drm_connector->display_info.cea_rev;
5389 strscpy(audio_info->display_name,
5390 edid_caps->display_name,
5391 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5393 if (cea_revision >= 3) {
5394 audio_info->mode_count = edid_caps->audio_mode_count;
5396 for (i = 0; i < audio_info->mode_count; ++i) {
5397 audio_info->modes[i].format_code =
5398 (enum audio_format_code)
5399 (edid_caps->audio_modes[i].format_code);
5400 audio_info->modes[i].channel_count =
5401 edid_caps->audio_modes[i].channel_count;
5402 audio_info->modes[i].sample_rates.all =
5403 edid_caps->audio_modes[i].sample_rate;
5404 audio_info->modes[i].sample_size =
5405 edid_caps->audio_modes[i].sample_size;
5409 audio_info->flags.all = edid_caps->speaker_flags;
5411 /* TODO: We only check for the progressive mode, check for interlace mode too */
5412 if (drm_connector->latency_present[0]) {
5413 audio_info->video_latency = drm_connector->video_latency[0];
5414 audio_info->audio_latency = drm_connector->audio_latency[0];
5417 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5422 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5423 struct drm_display_mode *dst_mode)
5425 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5426 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5427 dst_mode->crtc_clock = src_mode->crtc_clock;
5428 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5429 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5430 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5431 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5432 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5433 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5434 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5435 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5436 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5437 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5438 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5442 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5443 const struct drm_display_mode *native_mode,
5446 if (scale_enabled) {
5447 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5448 } else if (native_mode->clock == drm_mode->clock &&
5449 native_mode->htotal == drm_mode->htotal &&
5450 native_mode->vtotal == drm_mode->vtotal) {
5451 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5453 /* no scaling nor amdgpu inserted, no need to patch */
5457 static struct dc_sink *
5458 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5460 struct dc_sink_init_data sink_init_data = { 0 };
5461 struct dc_sink *sink = NULL;
5462 sink_init_data.link = aconnector->dc_link;
5463 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5465 sink = dc_sink_create(&sink_init_data);
5467 DRM_ERROR("Failed to create sink!\n");
5470 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5475 static void set_multisync_trigger_params(
5476 struct dc_stream_state *stream)
5478 struct dc_stream_state *master = NULL;
5480 if (stream->triggered_crtc_reset.enabled) {
5481 master = stream->triggered_crtc_reset.event_source;
5482 stream->triggered_crtc_reset.event =
5483 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5484 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5485 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5489 static void set_master_stream(struct dc_stream_state *stream_set[],
5492 int j, highest_rfr = 0, master_stream = 0;
5494 for (j = 0; j < stream_count; j++) {
5495 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5496 int refresh_rate = 0;
5498 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5499 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5500 if (refresh_rate > highest_rfr) {
5501 highest_rfr = refresh_rate;
5506 for (j = 0; j < stream_count; j++) {
5508 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5512 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5515 struct dc_stream_state *stream;
5517 if (context->stream_count < 2)
5519 for (i = 0; i < context->stream_count ; i++) {
5520 if (!context->streams[i])
5523 * TODO: add a function to read AMD VSDB bits and set
5524 * crtc_sync_master.multi_sync_enabled flag
5525 * For now it's set to false
5529 set_master_stream(context->streams, context->stream_count);
5531 for (i = 0; i < context->stream_count ; i++) {
5532 stream = context->streams[i];
5537 set_multisync_trigger_params(stream);
5541 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5542 struct dc_sink *sink, struct dc_stream_state *stream,
5543 struct dsc_dec_dpcd_caps *dsc_caps)
5545 stream->timing.flags.DSC = 0;
5547 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5548 #if defined(CONFIG_DRM_AMD_DC_DCN)
5549 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5550 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5551 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5557 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5558 struct dc_sink *sink, struct dc_stream_state *stream,
5559 struct dsc_dec_dpcd_caps *dsc_caps)
5561 struct drm_connector *drm_connector = &aconnector->base;
5562 uint32_t link_bandwidth_kbps;
5564 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5565 dc_link_get_link_cap(aconnector->dc_link));
5566 #if defined(CONFIG_DRM_AMD_DC_DCN)
5567 /* Set DSC policy according to dsc_clock_en */
5568 dc_dsc_policy_set_enable_dsc_when_not_needed(
5569 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5571 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5573 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5575 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5577 link_bandwidth_kbps,
5579 &stream->timing.dsc_cfg)) {
5580 stream->timing.flags.DSC = 1;
5581 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5585 /* Overwrite the stream flag if DSC is enabled through debugfs */
5586 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5587 stream->timing.flags.DSC = 1;
5589 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5590 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5592 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5593 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5595 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5596 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5600 static struct drm_display_mode *
5601 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5602 bool use_probed_modes)
5604 struct drm_display_mode *m, *m_pref = NULL;
5605 u16 current_refresh, highest_refresh;
5606 struct list_head *list_head = use_probed_modes ?
5607 &aconnector->base.probed_modes :
5608 &aconnector->base.modes;
5610 if (aconnector->freesync_vid_base.clock != 0)
5611 return &aconnector->freesync_vid_base;
5613 /* Find the preferred mode */
5614 list_for_each_entry (m, list_head, head) {
5615 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5622 /* Probably an EDID with no preferred mode. Fallback to first entry */
5623 m_pref = list_first_entry_or_null(
5624 &aconnector->base.modes, struct drm_display_mode, head);
5626 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5631 highest_refresh = drm_mode_vrefresh(m_pref);
5634 * Find the mode with highest refresh rate with same resolution.
5635 * For some monitors, preferred mode is not the mode with highest
5636 * supported refresh rate.
5638 list_for_each_entry (m, list_head, head) {
5639 current_refresh = drm_mode_vrefresh(m);
5641 if (m->hdisplay == m_pref->hdisplay &&
5642 m->vdisplay == m_pref->vdisplay &&
5643 highest_refresh < current_refresh) {
5644 highest_refresh = current_refresh;
5649 aconnector->freesync_vid_base = *m_pref;
5653 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5654 struct amdgpu_dm_connector *aconnector)
5656 struct drm_display_mode *high_mode;
5659 high_mode = get_highest_refresh_rate_mode(aconnector, false);
5660 if (!high_mode || !mode)
5663 timing_diff = high_mode->vtotal - mode->vtotal;
5665 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5666 high_mode->hdisplay != mode->hdisplay ||
5667 high_mode->vdisplay != mode->vdisplay ||
5668 high_mode->hsync_start != mode->hsync_start ||
5669 high_mode->hsync_end != mode->hsync_end ||
5670 high_mode->htotal != mode->htotal ||
5671 high_mode->hskew != mode->hskew ||
5672 high_mode->vscan != mode->vscan ||
5673 high_mode->vsync_start - mode->vsync_start != timing_diff ||
5674 high_mode->vsync_end - mode->vsync_end != timing_diff)
5680 static struct dc_stream_state *
5681 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5682 const struct drm_display_mode *drm_mode,
5683 const struct dm_connector_state *dm_state,
5684 const struct dc_stream_state *old_stream,
5687 struct drm_display_mode *preferred_mode = NULL;
5688 struct drm_connector *drm_connector;
5689 const struct drm_connector_state *con_state =
5690 dm_state ? &dm_state->base : NULL;
5691 struct dc_stream_state *stream = NULL;
5692 struct drm_display_mode mode = *drm_mode;
5693 struct drm_display_mode saved_mode;
5694 struct drm_display_mode *freesync_mode = NULL;
5695 bool native_mode_found = false;
5696 bool recalculate_timing = false;
5697 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5699 int preferred_refresh = 0;
5700 #if defined(CONFIG_DRM_AMD_DC_DCN)
5701 struct dsc_dec_dpcd_caps dsc_caps;
5703 struct dc_sink *sink = NULL;
5705 memset(&saved_mode, 0, sizeof(saved_mode));
5707 if (aconnector == NULL) {
5708 DRM_ERROR("aconnector is NULL!\n");
5712 drm_connector = &aconnector->base;
5714 if (!aconnector->dc_sink) {
5715 sink = create_fake_sink(aconnector);
5719 sink = aconnector->dc_sink;
5720 dc_sink_retain(sink);
5723 stream = dc_create_stream_for_sink(sink);
5725 if (stream == NULL) {
5726 DRM_ERROR("Failed to create stream for sink!\n");
5730 stream->dm_stream_context = aconnector;
5732 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5733 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5735 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5736 /* Search for preferred mode */
5737 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5738 native_mode_found = true;
5742 if (!native_mode_found)
5743 preferred_mode = list_first_entry_or_null(
5744 &aconnector->base.modes,
5745 struct drm_display_mode,
5748 mode_refresh = drm_mode_vrefresh(&mode);
5750 if (preferred_mode == NULL) {
5752 * This may not be an error, the use case is when we have no
5753 * usermode calls to reset and set mode upon hotplug. In this
5754 * case, we call set mode ourselves to restore the previous mode
5755 * and the modelist may not be filled in in time.
5757 DRM_DEBUG_DRIVER("No preferred mode found\n");
5759 recalculate_timing = amdgpu_freesync_vid_mode &&
5760 is_freesync_video_mode(&mode, aconnector);
5761 if (recalculate_timing) {
5762 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5764 mode = *freesync_mode;
5766 decide_crtc_timing_for_drm_display_mode(
5767 &mode, preferred_mode, scale);
5769 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5773 if (recalculate_timing)
5774 drm_mode_set_crtcinfo(&saved_mode, 0);
5776 drm_mode_set_crtcinfo(&mode, 0);
5779 * If scaling is enabled and refresh rate didn't change
5780 * we copy the vic and polarities of the old timings
5782 if (!scale || mode_refresh != preferred_refresh)
5783 fill_stream_properties_from_drm_display_mode(
5784 stream, &mode, &aconnector->base, con_state, NULL,
5787 fill_stream_properties_from_drm_display_mode(
5788 stream, &mode, &aconnector->base, con_state, old_stream,
5791 #if defined(CONFIG_DRM_AMD_DC_DCN)
5792 /* SST DSC determination policy */
5793 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5794 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5795 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
5798 update_stream_scaling_settings(&mode, dm_state, stream);
5801 &stream->audio_info,
5805 update_stream_signal(stream, sink);
5807 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5808 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5810 if (stream->link->psr_settings.psr_feature_enabled) {
5812 // should decide stream support vsc sdp colorimetry capability
5813 // before building vsc info packet
5815 stream->use_vsc_sdp_for_colorimetry = false;
5816 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5817 stream->use_vsc_sdp_for_colorimetry =
5818 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5820 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5821 stream->use_vsc_sdp_for_colorimetry = true;
5823 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5826 dc_sink_release(sink);
5831 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5833 drm_crtc_cleanup(crtc);
5837 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5838 struct drm_crtc_state *state)
5840 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5842 /* TODO Destroy dc_stream objects are stream object is flattened */
5844 dc_stream_release(cur->stream);
5847 __drm_atomic_helper_crtc_destroy_state(state);
5853 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5855 struct dm_crtc_state *state;
5858 dm_crtc_destroy_state(crtc, crtc->state);
5860 state = kzalloc(sizeof(*state), GFP_KERNEL);
5861 if (WARN_ON(!state))
5864 __drm_atomic_helper_crtc_reset(crtc, &state->base);
5867 static struct drm_crtc_state *
5868 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5870 struct dm_crtc_state *state, *cur;
5872 cur = to_dm_crtc_state(crtc->state);
5874 if (WARN_ON(!crtc->state))
5877 state = kzalloc(sizeof(*state), GFP_KERNEL);
5881 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5884 state->stream = cur->stream;
5885 dc_stream_retain(state->stream);
5888 state->active_planes = cur->active_planes;
5889 state->vrr_infopacket = cur->vrr_infopacket;
5890 state->abm_level = cur->abm_level;
5891 state->vrr_supported = cur->vrr_supported;
5892 state->freesync_config = cur->freesync_config;
5893 state->cm_has_degamma = cur->cm_has_degamma;
5894 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5895 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5897 return &state->base;
5900 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5901 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5903 crtc_debugfs_init(crtc);
5909 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5911 enum dc_irq_source irq_source;
5912 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5913 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5916 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5918 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5920 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5921 acrtc->crtc_id, enable ? "en" : "dis", rc);
5925 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5927 enum dc_irq_source irq_source;
5928 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5929 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5930 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5931 #if defined(CONFIG_DRM_AMD_DC_DCN)
5932 struct amdgpu_display_manager *dm = &adev->dm;
5933 unsigned long flags;
5938 /* vblank irq on -> Only need vupdate irq in vrr mode */
5939 if (amdgpu_dm_vrr_active(acrtc_state))
5940 rc = dm_set_vupdate_irq(crtc, true);
5942 /* vblank irq off -> vupdate irq off */
5943 rc = dm_set_vupdate_irq(crtc, false);
5949 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5951 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5954 if (amdgpu_in_reset(adev))
5957 #if defined(CONFIG_DRM_AMD_DC_DCN)
5958 spin_lock_irqsave(&dm->vblank_lock, flags);
5959 dm->vblank_workqueue->dm = dm;
5960 dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5961 dm->vblank_workqueue->enable = enable;
5962 spin_unlock_irqrestore(&dm->vblank_lock, flags);
5963 schedule_work(&dm->vblank_workqueue->mall_work);
5969 static int dm_enable_vblank(struct drm_crtc *crtc)
5971 return dm_set_vblank(crtc, true);
5974 static void dm_disable_vblank(struct drm_crtc *crtc)
5976 dm_set_vblank(crtc, false);
5979 /* Implemented only the options currently availible for the driver */
5980 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5981 .reset = dm_crtc_reset_state,
5982 .destroy = amdgpu_dm_crtc_destroy,
5983 .set_config = drm_atomic_helper_set_config,
5984 .page_flip = drm_atomic_helper_page_flip,
5985 .atomic_duplicate_state = dm_crtc_duplicate_state,
5986 .atomic_destroy_state = dm_crtc_destroy_state,
5987 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5988 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5989 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5990 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5991 .enable_vblank = dm_enable_vblank,
5992 .disable_vblank = dm_disable_vblank,
5993 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5994 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5995 .late_register = amdgpu_dm_crtc_late_register,
5999 static enum drm_connector_status
6000 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6003 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6007 * 1. This interface is NOT called in context of HPD irq.
6008 * 2. This interface *is called* in context of user-mode ioctl. Which
6009 * makes it a bad place for *any* MST-related activity.
6012 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6013 !aconnector->fake_enable)
6014 connected = (aconnector->dc_sink != NULL);
6016 connected = (aconnector->base.force == DRM_FORCE_ON);
6018 update_subconnector_property(aconnector);
6020 return (connected ? connector_status_connected :
6021 connector_status_disconnected);
6024 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6025 struct drm_connector_state *connector_state,
6026 struct drm_property *property,
6029 struct drm_device *dev = connector->dev;
6030 struct amdgpu_device *adev = drm_to_adev(dev);
6031 struct dm_connector_state *dm_old_state =
6032 to_dm_connector_state(connector->state);
6033 struct dm_connector_state *dm_new_state =
6034 to_dm_connector_state(connector_state);
6038 if (property == dev->mode_config.scaling_mode_property) {
6039 enum amdgpu_rmx_type rmx_type;
6042 case DRM_MODE_SCALE_CENTER:
6043 rmx_type = RMX_CENTER;
6045 case DRM_MODE_SCALE_ASPECT:
6046 rmx_type = RMX_ASPECT;
6048 case DRM_MODE_SCALE_FULLSCREEN:
6049 rmx_type = RMX_FULL;
6051 case DRM_MODE_SCALE_NONE:
6057 if (dm_old_state->scaling == rmx_type)
6060 dm_new_state->scaling = rmx_type;
6062 } else if (property == adev->mode_info.underscan_hborder_property) {
6063 dm_new_state->underscan_hborder = val;
6065 } else if (property == adev->mode_info.underscan_vborder_property) {
6066 dm_new_state->underscan_vborder = val;
6068 } else if (property == adev->mode_info.underscan_property) {
6069 dm_new_state->underscan_enable = val;
6071 } else if (property == adev->mode_info.abm_level_property) {
6072 dm_new_state->abm_level = val;
6079 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6080 const struct drm_connector_state *state,
6081 struct drm_property *property,
6084 struct drm_device *dev = connector->dev;
6085 struct amdgpu_device *adev = drm_to_adev(dev);
6086 struct dm_connector_state *dm_state =
6087 to_dm_connector_state(state);
6090 if (property == dev->mode_config.scaling_mode_property) {
6091 switch (dm_state->scaling) {
6093 *val = DRM_MODE_SCALE_CENTER;
6096 *val = DRM_MODE_SCALE_ASPECT;
6099 *val = DRM_MODE_SCALE_FULLSCREEN;
6103 *val = DRM_MODE_SCALE_NONE;
6107 } else if (property == adev->mode_info.underscan_hborder_property) {
6108 *val = dm_state->underscan_hborder;
6110 } else if (property == adev->mode_info.underscan_vborder_property) {
6111 *val = dm_state->underscan_vborder;
6113 } else if (property == adev->mode_info.underscan_property) {
6114 *val = dm_state->underscan_enable;
6116 } else if (property == adev->mode_info.abm_level_property) {
6117 *val = dm_state->abm_level;
6124 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6126 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6128 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6131 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6133 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6134 const struct dc_link *link = aconnector->dc_link;
6135 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6136 struct amdgpu_display_manager *dm = &adev->dm;
6139 * Call only if mst_mgr was iniitalized before since it's not done
6140 * for all connector types.
6142 if (aconnector->mst_mgr.dev)
6143 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6145 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6146 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6148 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
6149 link->type != dc_connection_none &&
6150 dm->backlight_dev) {
6151 backlight_device_unregister(dm->backlight_dev);
6152 dm->backlight_dev = NULL;
6156 if (aconnector->dc_em_sink)
6157 dc_sink_release(aconnector->dc_em_sink);
6158 aconnector->dc_em_sink = NULL;
6159 if (aconnector->dc_sink)
6160 dc_sink_release(aconnector->dc_sink);
6161 aconnector->dc_sink = NULL;
6163 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6164 drm_connector_unregister(connector);
6165 drm_connector_cleanup(connector);
6166 if (aconnector->i2c) {
6167 i2c_del_adapter(&aconnector->i2c->base);
6168 kfree(aconnector->i2c);
6170 kfree(aconnector->dm_dp_aux.aux.name);
6175 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6177 struct dm_connector_state *state =
6178 to_dm_connector_state(connector->state);
6180 if (connector->state)
6181 __drm_atomic_helper_connector_destroy_state(connector->state);
6185 state = kzalloc(sizeof(*state), GFP_KERNEL);
6188 state->scaling = RMX_OFF;
6189 state->underscan_enable = false;
6190 state->underscan_hborder = 0;
6191 state->underscan_vborder = 0;
6192 state->base.max_requested_bpc = 8;
6193 state->vcpi_slots = 0;
6195 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6196 state->abm_level = amdgpu_dm_abm_level;
6198 __drm_atomic_helper_connector_reset(connector, &state->base);
6202 struct drm_connector_state *
6203 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6205 struct dm_connector_state *state =
6206 to_dm_connector_state(connector->state);
6208 struct dm_connector_state *new_state =
6209 kmemdup(state, sizeof(*state), GFP_KERNEL);
6214 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6216 new_state->freesync_capable = state->freesync_capable;
6217 new_state->abm_level = state->abm_level;
6218 new_state->scaling = state->scaling;
6219 new_state->underscan_enable = state->underscan_enable;
6220 new_state->underscan_hborder = state->underscan_hborder;
6221 new_state->underscan_vborder = state->underscan_vborder;
6222 new_state->vcpi_slots = state->vcpi_slots;
6223 new_state->pbn = state->pbn;
6224 return &new_state->base;
6228 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6230 struct amdgpu_dm_connector *amdgpu_dm_connector =
6231 to_amdgpu_dm_connector(connector);
6234 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6235 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6236 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6237 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6242 #if defined(CONFIG_DEBUG_FS)
6243 connector_debugfs_init(amdgpu_dm_connector);
6249 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6250 .reset = amdgpu_dm_connector_funcs_reset,
6251 .detect = amdgpu_dm_connector_detect,
6252 .fill_modes = drm_helper_probe_single_connector_modes,
6253 .destroy = amdgpu_dm_connector_destroy,
6254 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6255 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6256 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6257 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6258 .late_register = amdgpu_dm_connector_late_register,
6259 .early_unregister = amdgpu_dm_connector_unregister
6262 static int get_modes(struct drm_connector *connector)
6264 return amdgpu_dm_connector_get_modes(connector);
6267 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6269 struct dc_sink_init_data init_params = {
6270 .link = aconnector->dc_link,
6271 .sink_signal = SIGNAL_TYPE_VIRTUAL
6275 if (!aconnector->base.edid_blob_ptr) {
6276 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6277 aconnector->base.name);
6279 aconnector->base.force = DRM_FORCE_OFF;
6280 aconnector->base.override_edid = false;
6284 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6286 aconnector->edid = edid;
6288 aconnector->dc_em_sink = dc_link_add_remote_sink(
6289 aconnector->dc_link,
6291 (edid->extensions + 1) * EDID_LENGTH,
6294 if (aconnector->base.force == DRM_FORCE_ON) {
6295 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6296 aconnector->dc_link->local_sink :
6297 aconnector->dc_em_sink;
6298 dc_sink_retain(aconnector->dc_sink);
6302 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6304 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6307 * In case of headless boot with force on for DP managed connector
6308 * Those settings have to be != 0 to get initial modeset
6310 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6311 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6312 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6316 aconnector->base.override_edid = true;
6317 create_eml_sink(aconnector);
6320 static struct dc_stream_state *
6321 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6322 const struct drm_display_mode *drm_mode,
6323 const struct dm_connector_state *dm_state,
6324 const struct dc_stream_state *old_stream)
6326 struct drm_connector *connector = &aconnector->base;
6327 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6328 struct dc_stream_state *stream;
6329 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6330 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6331 enum dc_status dc_result = DC_OK;
6334 stream = create_stream_for_sink(aconnector, drm_mode,
6335 dm_state, old_stream,
6337 if (stream == NULL) {
6338 DRM_ERROR("Failed to create stream for sink!\n");
6342 dc_result = dc_validate_stream(adev->dm.dc, stream);
6344 if (dc_result != DC_OK) {
6345 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6350 dc_status_to_str(dc_result));
6352 dc_stream_release(stream);
6354 requested_bpc -= 2; /* lower bpc to retry validation */
6357 } while (stream == NULL && requested_bpc >= 6);
6359 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6360 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6362 aconnector->force_yuv420_output = true;
6363 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6364 dm_state, old_stream);
6365 aconnector->force_yuv420_output = false;
6371 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6372 struct drm_display_mode *mode)
6374 int result = MODE_ERROR;
6375 struct dc_sink *dc_sink;
6376 /* TODO: Unhardcode stream count */
6377 struct dc_stream_state *stream;
6378 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6380 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6381 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6385 * Only run this the first time mode_valid is called to initilialize
6388 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6389 !aconnector->dc_em_sink)
6390 handle_edid_mgmt(aconnector);
6392 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6394 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6395 aconnector->base.force != DRM_FORCE_ON) {
6396 DRM_ERROR("dc_sink is NULL!\n");
6400 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6402 dc_stream_release(stream);
6407 /* TODO: error handling*/
6411 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6412 struct dc_info_packet *out)
6414 struct hdmi_drm_infoframe frame;
6415 unsigned char buf[30]; /* 26 + 4 */
6419 memset(out, 0, sizeof(*out));
6421 if (!state->hdr_output_metadata)
6424 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6428 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6432 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6436 /* Prepare the infopacket for DC. */
6437 switch (state->connector->connector_type) {
6438 case DRM_MODE_CONNECTOR_HDMIA:
6439 out->hb0 = 0x87; /* type */
6440 out->hb1 = 0x01; /* version */
6441 out->hb2 = 0x1A; /* length */
6442 out->sb[0] = buf[3]; /* checksum */
6446 case DRM_MODE_CONNECTOR_DisplayPort:
6447 case DRM_MODE_CONNECTOR_eDP:
6448 out->hb0 = 0x00; /* sdp id, zero */
6449 out->hb1 = 0x87; /* type */
6450 out->hb2 = 0x1D; /* payload len - 1 */
6451 out->hb3 = (0x13 << 2); /* sdp version */
6452 out->sb[0] = 0x01; /* version */
6453 out->sb[1] = 0x1A; /* length */
6461 memcpy(&out->sb[i], &buf[4], 26);
6464 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6465 sizeof(out->sb), false);
6471 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6472 struct drm_atomic_state *state)
6474 struct drm_connector_state *new_con_state =
6475 drm_atomic_get_new_connector_state(state, conn);
6476 struct drm_connector_state *old_con_state =
6477 drm_atomic_get_old_connector_state(state, conn);
6478 struct drm_crtc *crtc = new_con_state->crtc;
6479 struct drm_crtc_state *new_crtc_state;
6482 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6487 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6488 struct dc_info_packet hdr_infopacket;
6490 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6494 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6495 if (IS_ERR(new_crtc_state))
6496 return PTR_ERR(new_crtc_state);
6499 * DC considers the stream backends changed if the
6500 * static metadata changes. Forcing the modeset also
6501 * gives a simple way for userspace to switch from
6502 * 8bpc to 10bpc when setting the metadata to enter
6505 * Changing the static metadata after it's been
6506 * set is permissible, however. So only force a
6507 * modeset if we're entering or exiting HDR.
6509 new_crtc_state->mode_changed =
6510 !old_con_state->hdr_output_metadata ||
6511 !new_con_state->hdr_output_metadata;
6517 static const struct drm_connector_helper_funcs
6518 amdgpu_dm_connector_helper_funcs = {
6520 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6521 * modes will be filtered by drm_mode_validate_size(), and those modes
6522 * are missing after user start lightdm. So we need to renew modes list.
6523 * in get_modes call back, not just return the modes count
6525 .get_modes = get_modes,
6526 .mode_valid = amdgpu_dm_connector_mode_valid,
6527 .atomic_check = amdgpu_dm_connector_atomic_check,
6530 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6534 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6536 struct drm_atomic_state *state = new_crtc_state->state;
6537 struct drm_plane *plane;
6540 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6541 struct drm_plane_state *new_plane_state;
6543 /* Cursor planes are "fake". */
6544 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6547 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6549 if (!new_plane_state) {
6551 * The plane is enable on the CRTC and hasn't changed
6552 * state. This means that it previously passed
6553 * validation and is therefore enabled.
6559 /* We need a framebuffer to be considered enabled. */
6560 num_active += (new_plane_state->fb != NULL);
6566 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6567 struct drm_crtc_state *new_crtc_state)
6569 struct dm_crtc_state *dm_new_crtc_state =
6570 to_dm_crtc_state(new_crtc_state);
6572 dm_new_crtc_state->active_planes = 0;
6574 if (!dm_new_crtc_state->stream)
6577 dm_new_crtc_state->active_planes =
6578 count_crtc_active_planes(new_crtc_state);
6581 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6582 struct drm_atomic_state *state)
6584 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6586 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6587 struct dc *dc = adev->dm.dc;
6588 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6591 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6593 dm_update_crtc_active_planes(crtc, crtc_state);
6595 if (unlikely(!dm_crtc_state->stream &&
6596 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6602 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6603 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6604 * planes are disabled, which is not supported by the hardware. And there is legacy
6605 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6607 if (crtc_state->enable &&
6608 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6609 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6613 /* In some use cases, like reset, no stream is attached */
6614 if (!dm_crtc_state->stream)
6617 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6620 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6624 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6625 const struct drm_display_mode *mode,
6626 struct drm_display_mode *adjusted_mode)
6631 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6632 .disable = dm_crtc_helper_disable,
6633 .atomic_check = dm_crtc_helper_atomic_check,
6634 .mode_fixup = dm_crtc_helper_mode_fixup,
6635 .get_scanout_position = amdgpu_crtc_get_scanout_position,
6638 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6643 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6645 switch (display_color_depth) {
6646 case COLOR_DEPTH_666:
6648 case COLOR_DEPTH_888:
6650 case COLOR_DEPTH_101010:
6652 case COLOR_DEPTH_121212:
6654 case COLOR_DEPTH_141414:
6656 case COLOR_DEPTH_161616:
6664 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6665 struct drm_crtc_state *crtc_state,
6666 struct drm_connector_state *conn_state)
6668 struct drm_atomic_state *state = crtc_state->state;
6669 struct drm_connector *connector = conn_state->connector;
6670 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6671 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6672 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6673 struct drm_dp_mst_topology_mgr *mst_mgr;
6674 struct drm_dp_mst_port *mst_port;
6675 enum dc_color_depth color_depth;
6677 bool is_y420 = false;
6679 if (!aconnector->port || !aconnector->dc_sink)
6682 mst_port = aconnector->port;
6683 mst_mgr = &aconnector->mst_port->mst_mgr;
6685 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6688 if (!state->duplicated) {
6689 int max_bpc = conn_state->max_requested_bpc;
6690 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6691 aconnector->force_yuv420_output;
6692 color_depth = convert_color_depth_from_display_info(connector,
6695 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6696 clock = adjusted_mode->clock;
6697 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6699 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6702 dm_new_connector_state->pbn,
6703 dm_mst_get_pbn_divider(aconnector->dc_link));
6704 if (dm_new_connector_state->vcpi_slots < 0) {
6705 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6706 return dm_new_connector_state->vcpi_slots;
6711 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6712 .disable = dm_encoder_helper_disable,
6713 .atomic_check = dm_encoder_helper_atomic_check
6716 #if defined(CONFIG_DRM_AMD_DC_DCN)
6717 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6718 struct dc_state *dc_state)
6720 struct dc_stream_state *stream = NULL;
6721 struct drm_connector *connector;
6722 struct drm_connector_state *new_con_state;
6723 struct amdgpu_dm_connector *aconnector;
6724 struct dm_connector_state *dm_conn_state;
6725 int i, j, clock, bpp;
6726 int vcpi, pbn_div, pbn = 0;
6728 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6730 aconnector = to_amdgpu_dm_connector(connector);
6732 if (!aconnector->port)
6735 if (!new_con_state || !new_con_state->crtc)
6738 dm_conn_state = to_dm_connector_state(new_con_state);
6740 for (j = 0; j < dc_state->stream_count; j++) {
6741 stream = dc_state->streams[j];
6745 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6754 if (stream->timing.flags.DSC != 1) {
6755 drm_dp_mst_atomic_enable_dsc(state,
6763 pbn_div = dm_mst_get_pbn_divider(stream->link);
6764 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6765 clock = stream->timing.pix_clk_100hz / 10;
6766 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6767 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6774 dm_conn_state->pbn = pbn;
6775 dm_conn_state->vcpi_slots = vcpi;
6781 static void dm_drm_plane_reset(struct drm_plane *plane)
6783 struct dm_plane_state *amdgpu_state = NULL;
6786 plane->funcs->atomic_destroy_state(plane, plane->state);
6788 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6789 WARN_ON(amdgpu_state == NULL);
6792 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6795 static struct drm_plane_state *
6796 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6798 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6800 old_dm_plane_state = to_dm_plane_state(plane->state);
6801 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6802 if (!dm_plane_state)
6805 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6807 if (old_dm_plane_state->dc_state) {
6808 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6809 dc_plane_state_retain(dm_plane_state->dc_state);
6812 return &dm_plane_state->base;
6815 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6816 struct drm_plane_state *state)
6818 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6820 if (dm_plane_state->dc_state)
6821 dc_plane_state_release(dm_plane_state->dc_state);
6823 drm_atomic_helper_plane_destroy_state(plane, state);
6826 static const struct drm_plane_funcs dm_plane_funcs = {
6827 .update_plane = drm_atomic_helper_update_plane,
6828 .disable_plane = drm_atomic_helper_disable_plane,
6829 .destroy = drm_primary_helper_destroy,
6830 .reset = dm_drm_plane_reset,
6831 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6832 .atomic_destroy_state = dm_drm_plane_destroy_state,
6833 .format_mod_supported = dm_plane_format_mod_supported,
6836 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6837 struct drm_plane_state *new_state)
6839 struct amdgpu_framebuffer *afb;
6840 struct drm_gem_object *obj;
6841 struct amdgpu_device *adev;
6842 struct amdgpu_bo *rbo;
6843 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6844 struct list_head list;
6845 struct ttm_validate_buffer tv;
6846 struct ww_acquire_ctx ticket;
6850 if (!new_state->fb) {
6851 DRM_DEBUG_KMS("No FB bound\n");
6855 afb = to_amdgpu_framebuffer(new_state->fb);
6856 obj = new_state->fb->obj[0];
6857 rbo = gem_to_amdgpu_bo(obj);
6858 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6859 INIT_LIST_HEAD(&list);
6863 list_add(&tv.head, &list);
6865 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6867 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6871 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6872 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6874 domain = AMDGPU_GEM_DOMAIN_VRAM;
6876 r = amdgpu_bo_pin(rbo, domain);
6877 if (unlikely(r != 0)) {
6878 if (r != -ERESTARTSYS)
6879 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6880 ttm_eu_backoff_reservation(&ticket, &list);
6884 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6885 if (unlikely(r != 0)) {
6886 amdgpu_bo_unpin(rbo);
6887 ttm_eu_backoff_reservation(&ticket, &list);
6888 DRM_ERROR("%p bind failed\n", rbo);
6892 ttm_eu_backoff_reservation(&ticket, &list);
6894 afb->address = amdgpu_bo_gpu_offset(rbo);
6899 * We don't do surface updates on planes that have been newly created,
6900 * but we also don't have the afb->address during atomic check.
6902 * Fill in buffer attributes depending on the address here, but only on
6903 * newly created planes since they're not being used by DC yet and this
6904 * won't modify global state.
6906 dm_plane_state_old = to_dm_plane_state(plane->state);
6907 dm_plane_state_new = to_dm_plane_state(new_state);
6909 if (dm_plane_state_new->dc_state &&
6910 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6911 struct dc_plane_state *plane_state =
6912 dm_plane_state_new->dc_state;
6913 bool force_disable_dcc = !plane_state->dcc.enable;
6915 fill_plane_buffer_attributes(
6916 adev, afb, plane_state->format, plane_state->rotation,
6918 &plane_state->tiling_info, &plane_state->plane_size,
6919 &plane_state->dcc, &plane_state->address,
6920 afb->tmz_surface, force_disable_dcc);
6926 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6927 struct drm_plane_state *old_state)
6929 struct amdgpu_bo *rbo;
6935 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6936 r = amdgpu_bo_reserve(rbo, false);
6938 DRM_ERROR("failed to reserve rbo before unpin\n");
6942 amdgpu_bo_unpin(rbo);
6943 amdgpu_bo_unreserve(rbo);
6944 amdgpu_bo_unref(&rbo);
6947 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6948 struct drm_crtc_state *new_crtc_state)
6950 struct drm_framebuffer *fb = state->fb;
6951 int min_downscale, max_upscale;
6953 int max_scale = INT_MAX;
6955 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6956 if (fb && state->crtc) {
6957 /* Validate viewport to cover the case when only the position changes */
6958 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6959 int viewport_width = state->crtc_w;
6960 int viewport_height = state->crtc_h;
6962 if (state->crtc_x < 0)
6963 viewport_width += state->crtc_x;
6964 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6965 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6967 if (state->crtc_y < 0)
6968 viewport_height += state->crtc_y;
6969 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6970 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6972 if (viewport_width < 0 || viewport_height < 0) {
6973 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6975 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6976 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6978 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6979 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6985 /* Get min/max allowed scaling factors from plane caps. */
6986 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6987 &min_downscale, &max_upscale);
6989 * Convert to drm convention: 16.16 fixed point, instead of dc's
6990 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6991 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6993 min_scale = (1000 << 16) / max_upscale;
6994 max_scale = (1000 << 16) / min_downscale;
6997 return drm_atomic_helper_check_plane_state(
6998 state, new_crtc_state, min_scale, max_scale, true, true);
7001 static int dm_plane_atomic_check(struct drm_plane *plane,
7002 struct drm_atomic_state *state)
7004 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7006 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7007 struct dc *dc = adev->dm.dc;
7008 struct dm_plane_state *dm_plane_state;
7009 struct dc_scaling_info scaling_info;
7010 struct drm_crtc_state *new_crtc_state;
7013 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7015 dm_plane_state = to_dm_plane_state(new_plane_state);
7017 if (!dm_plane_state->dc_state)
7021 drm_atomic_get_new_crtc_state(state,
7022 new_plane_state->crtc);
7023 if (!new_crtc_state)
7026 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7030 ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7034 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7040 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7041 struct drm_atomic_state *state)
7043 /* Only support async updates on cursor planes. */
7044 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7050 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7051 struct drm_atomic_state *state)
7053 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7055 struct drm_plane_state *old_state =
7056 drm_atomic_get_old_plane_state(state, plane);
7058 trace_amdgpu_dm_atomic_update_cursor(new_state);
7060 swap(plane->state->fb, new_state->fb);
7062 plane->state->src_x = new_state->src_x;
7063 plane->state->src_y = new_state->src_y;
7064 plane->state->src_w = new_state->src_w;
7065 plane->state->src_h = new_state->src_h;
7066 plane->state->crtc_x = new_state->crtc_x;
7067 plane->state->crtc_y = new_state->crtc_y;
7068 plane->state->crtc_w = new_state->crtc_w;
7069 plane->state->crtc_h = new_state->crtc_h;
7071 handle_cursor_update(plane, old_state);
7074 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7075 .prepare_fb = dm_plane_helper_prepare_fb,
7076 .cleanup_fb = dm_plane_helper_cleanup_fb,
7077 .atomic_check = dm_plane_atomic_check,
7078 .atomic_async_check = dm_plane_atomic_async_check,
7079 .atomic_async_update = dm_plane_atomic_async_update
7083 * TODO: these are currently initialized to rgb formats only.
7084 * For future use cases we should either initialize them dynamically based on
7085 * plane capabilities, or initialize this array to all formats, so internal drm
7086 * check will succeed, and let DC implement proper check
7088 static const uint32_t rgb_formats[] = {
7089 DRM_FORMAT_XRGB8888,
7090 DRM_FORMAT_ARGB8888,
7091 DRM_FORMAT_RGBA8888,
7092 DRM_FORMAT_XRGB2101010,
7093 DRM_FORMAT_XBGR2101010,
7094 DRM_FORMAT_ARGB2101010,
7095 DRM_FORMAT_ABGR2101010,
7096 DRM_FORMAT_XBGR8888,
7097 DRM_FORMAT_ABGR8888,
7101 static const uint32_t overlay_formats[] = {
7102 DRM_FORMAT_XRGB8888,
7103 DRM_FORMAT_ARGB8888,
7104 DRM_FORMAT_RGBA8888,
7105 DRM_FORMAT_XBGR8888,
7106 DRM_FORMAT_ABGR8888,
7110 static const u32 cursor_formats[] = {
7114 static int get_plane_formats(const struct drm_plane *plane,
7115 const struct dc_plane_cap *plane_cap,
7116 uint32_t *formats, int max_formats)
7118 int i, num_formats = 0;
7121 * TODO: Query support for each group of formats directly from
7122 * DC plane caps. This will require adding more formats to the
7126 switch (plane->type) {
7127 case DRM_PLANE_TYPE_PRIMARY:
7128 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7129 if (num_formats >= max_formats)
7132 formats[num_formats++] = rgb_formats[i];
7135 if (plane_cap && plane_cap->pixel_format_support.nv12)
7136 formats[num_formats++] = DRM_FORMAT_NV12;
7137 if (plane_cap && plane_cap->pixel_format_support.p010)
7138 formats[num_formats++] = DRM_FORMAT_P010;
7139 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7140 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7141 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7142 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7143 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7147 case DRM_PLANE_TYPE_OVERLAY:
7148 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7149 if (num_formats >= max_formats)
7152 formats[num_formats++] = overlay_formats[i];
7156 case DRM_PLANE_TYPE_CURSOR:
7157 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7158 if (num_formats >= max_formats)
7161 formats[num_formats++] = cursor_formats[i];
7169 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7170 struct drm_plane *plane,
7171 unsigned long possible_crtcs,
7172 const struct dc_plane_cap *plane_cap)
7174 uint32_t formats[32];
7177 unsigned int supported_rotations;
7178 uint64_t *modifiers = NULL;
7180 num_formats = get_plane_formats(plane, plane_cap, formats,
7181 ARRAY_SIZE(formats));
7183 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7187 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7188 &dm_plane_funcs, formats, num_formats,
7189 modifiers, plane->type, NULL);
7194 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7195 plane_cap && plane_cap->per_pixel_alpha) {
7196 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7197 BIT(DRM_MODE_BLEND_PREMULTI);
7199 drm_plane_create_alpha_property(plane);
7200 drm_plane_create_blend_mode_property(plane, blend_caps);
7203 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7205 (plane_cap->pixel_format_support.nv12 ||
7206 plane_cap->pixel_format_support.p010)) {
7207 /* This only affects YUV formats. */
7208 drm_plane_create_color_properties(
7210 BIT(DRM_COLOR_YCBCR_BT601) |
7211 BIT(DRM_COLOR_YCBCR_BT709) |
7212 BIT(DRM_COLOR_YCBCR_BT2020),
7213 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7214 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7215 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7218 supported_rotations =
7219 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7220 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7222 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7223 plane->type != DRM_PLANE_TYPE_CURSOR)
7224 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7225 supported_rotations);
7227 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7229 /* Create (reset) the plane state */
7230 if (plane->funcs->reset)
7231 plane->funcs->reset(plane);
7236 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7237 struct drm_plane *plane,
7238 uint32_t crtc_index)
7240 struct amdgpu_crtc *acrtc = NULL;
7241 struct drm_plane *cursor_plane;
7245 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7249 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7250 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7252 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7256 res = drm_crtc_init_with_planes(
7261 &amdgpu_dm_crtc_funcs, NULL);
7266 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7268 /* Create (reset) the plane state */
7269 if (acrtc->base.funcs->reset)
7270 acrtc->base.funcs->reset(&acrtc->base);
7272 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7273 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7275 acrtc->crtc_id = crtc_index;
7276 acrtc->base.enabled = false;
7277 acrtc->otg_inst = -1;
7279 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7280 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7281 true, MAX_COLOR_LUT_ENTRIES);
7282 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7288 kfree(cursor_plane);
7293 static int to_drm_connector_type(enum signal_type st)
7296 case SIGNAL_TYPE_HDMI_TYPE_A:
7297 return DRM_MODE_CONNECTOR_HDMIA;
7298 case SIGNAL_TYPE_EDP:
7299 return DRM_MODE_CONNECTOR_eDP;
7300 case SIGNAL_TYPE_LVDS:
7301 return DRM_MODE_CONNECTOR_LVDS;
7302 case SIGNAL_TYPE_RGB:
7303 return DRM_MODE_CONNECTOR_VGA;
7304 case SIGNAL_TYPE_DISPLAY_PORT:
7305 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7306 return DRM_MODE_CONNECTOR_DisplayPort;
7307 case SIGNAL_TYPE_DVI_DUAL_LINK:
7308 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7309 return DRM_MODE_CONNECTOR_DVID;
7310 case SIGNAL_TYPE_VIRTUAL:
7311 return DRM_MODE_CONNECTOR_VIRTUAL;
7314 return DRM_MODE_CONNECTOR_Unknown;
7318 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7320 struct drm_encoder *encoder;
7322 /* There is only one encoder per connector */
7323 drm_connector_for_each_possible_encoder(connector, encoder)
7329 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7331 struct drm_encoder *encoder;
7332 struct amdgpu_encoder *amdgpu_encoder;
7334 encoder = amdgpu_dm_connector_to_encoder(connector);
7336 if (encoder == NULL)
7339 amdgpu_encoder = to_amdgpu_encoder(encoder);
7341 amdgpu_encoder->native_mode.clock = 0;
7343 if (!list_empty(&connector->probed_modes)) {
7344 struct drm_display_mode *preferred_mode = NULL;
7346 list_for_each_entry(preferred_mode,
7347 &connector->probed_modes,
7349 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7350 amdgpu_encoder->native_mode = *preferred_mode;
7358 static struct drm_display_mode *
7359 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7361 int hdisplay, int vdisplay)
7363 struct drm_device *dev = encoder->dev;
7364 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7365 struct drm_display_mode *mode = NULL;
7366 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7368 mode = drm_mode_duplicate(dev, native_mode);
7373 mode->hdisplay = hdisplay;
7374 mode->vdisplay = vdisplay;
7375 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7376 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7382 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7383 struct drm_connector *connector)
7385 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7386 struct drm_display_mode *mode = NULL;
7387 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7388 struct amdgpu_dm_connector *amdgpu_dm_connector =
7389 to_amdgpu_dm_connector(connector);
7393 char name[DRM_DISPLAY_MODE_LEN];
7396 } common_modes[] = {
7397 { "640x480", 640, 480},
7398 { "800x600", 800, 600},
7399 { "1024x768", 1024, 768},
7400 { "1280x720", 1280, 720},
7401 { "1280x800", 1280, 800},
7402 {"1280x1024", 1280, 1024},
7403 { "1440x900", 1440, 900},
7404 {"1680x1050", 1680, 1050},
7405 {"1600x1200", 1600, 1200},
7406 {"1920x1080", 1920, 1080},
7407 {"1920x1200", 1920, 1200}
7410 n = ARRAY_SIZE(common_modes);
7412 for (i = 0; i < n; i++) {
7413 struct drm_display_mode *curmode = NULL;
7414 bool mode_existed = false;
7416 if (common_modes[i].w > native_mode->hdisplay ||
7417 common_modes[i].h > native_mode->vdisplay ||
7418 (common_modes[i].w == native_mode->hdisplay &&
7419 common_modes[i].h == native_mode->vdisplay))
7422 list_for_each_entry(curmode, &connector->probed_modes, head) {
7423 if (common_modes[i].w == curmode->hdisplay &&
7424 common_modes[i].h == curmode->vdisplay) {
7425 mode_existed = true;
7433 mode = amdgpu_dm_create_common_mode(encoder,
7434 common_modes[i].name, common_modes[i].w,
7436 drm_mode_probed_add(connector, mode);
7437 amdgpu_dm_connector->num_modes++;
7441 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7444 struct amdgpu_dm_connector *amdgpu_dm_connector =
7445 to_amdgpu_dm_connector(connector);
7448 /* empty probed_modes */
7449 INIT_LIST_HEAD(&connector->probed_modes);
7450 amdgpu_dm_connector->num_modes =
7451 drm_add_edid_modes(connector, edid);
7453 /* sorting the probed modes before calling function
7454 * amdgpu_dm_get_native_mode() since EDID can have
7455 * more than one preferred mode. The modes that are
7456 * later in the probed mode list could be of higher
7457 * and preferred resolution. For example, 3840x2160
7458 * resolution in base EDID preferred timing and 4096x2160
7459 * preferred resolution in DID extension block later.
7461 drm_mode_sort(&connector->probed_modes);
7462 amdgpu_dm_get_native_mode(connector);
7464 /* Freesync capabilities are reset by calling
7465 * drm_add_edid_modes() and need to be
7468 amdgpu_dm_update_freesync_caps(connector, edid);
7470 amdgpu_dm_connector->num_modes = 0;
7474 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7475 struct drm_display_mode *mode)
7477 struct drm_display_mode *m;
7479 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7480 if (drm_mode_equal(m, mode))
7487 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7489 const struct drm_display_mode *m;
7490 struct drm_display_mode *new_mode;
7492 uint32_t new_modes_count = 0;
7494 /* Standard FPS values
7503 * 60 - Commonly used
7504 * 48,72,96 - Multiples of 24
7506 const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7507 48000, 50000, 60000, 72000, 96000 };
7510 * Find mode with highest refresh rate with the same resolution
7511 * as the preferred mode. Some monitors report a preferred mode
7512 * with lower resolution than the highest refresh rate supported.
7515 m = get_highest_refresh_rate_mode(aconnector, true);
7519 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7520 uint64_t target_vtotal, target_vtotal_diff;
7523 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7526 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7527 common_rates[i] > aconnector->max_vfreq * 1000)
7530 num = (unsigned long long)m->clock * 1000 * 1000;
7531 den = common_rates[i] * (unsigned long long)m->htotal;
7532 target_vtotal = div_u64(num, den);
7533 target_vtotal_diff = target_vtotal - m->vtotal;
7535 /* Check for illegal modes */
7536 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7537 m->vsync_end + target_vtotal_diff < m->vsync_start ||
7538 m->vtotal + target_vtotal_diff < m->vsync_end)
7541 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7545 new_mode->vtotal += (u16)target_vtotal_diff;
7546 new_mode->vsync_start += (u16)target_vtotal_diff;
7547 new_mode->vsync_end += (u16)target_vtotal_diff;
7548 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7549 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7551 if (!is_duplicate_mode(aconnector, new_mode)) {
7552 drm_mode_probed_add(&aconnector->base, new_mode);
7553 new_modes_count += 1;
7555 drm_mode_destroy(aconnector->base.dev, new_mode);
7558 return new_modes_count;
7561 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7564 struct amdgpu_dm_connector *amdgpu_dm_connector =
7565 to_amdgpu_dm_connector(connector);
7567 if (!(amdgpu_freesync_vid_mode && edid))
7570 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7571 amdgpu_dm_connector->num_modes +=
7572 add_fs_modes(amdgpu_dm_connector);
7575 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7577 struct amdgpu_dm_connector *amdgpu_dm_connector =
7578 to_amdgpu_dm_connector(connector);
7579 struct drm_encoder *encoder;
7580 struct edid *edid = amdgpu_dm_connector->edid;
7582 encoder = amdgpu_dm_connector_to_encoder(connector);
7584 if (!drm_edid_is_valid(edid)) {
7585 amdgpu_dm_connector->num_modes =
7586 drm_add_modes_noedid(connector, 640, 480);
7588 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7589 amdgpu_dm_connector_add_common_modes(encoder, connector);
7590 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7592 amdgpu_dm_fbc_init(connector);
7594 return amdgpu_dm_connector->num_modes;
7597 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7598 struct amdgpu_dm_connector *aconnector,
7600 struct dc_link *link,
7603 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7606 * Some of the properties below require access to state, like bpc.
7607 * Allocate some default initial connector state with our reset helper.
7609 if (aconnector->base.funcs->reset)
7610 aconnector->base.funcs->reset(&aconnector->base);
7612 aconnector->connector_id = link_index;
7613 aconnector->dc_link = link;
7614 aconnector->base.interlace_allowed = false;
7615 aconnector->base.doublescan_allowed = false;
7616 aconnector->base.stereo_allowed = false;
7617 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7618 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7619 aconnector->audio_inst = -1;
7620 mutex_init(&aconnector->hpd_lock);
7623 * configure support HPD hot plug connector_>polled default value is 0
7624 * which means HPD hot plug not supported
7626 switch (connector_type) {
7627 case DRM_MODE_CONNECTOR_HDMIA:
7628 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7629 aconnector->base.ycbcr_420_allowed =
7630 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7632 case DRM_MODE_CONNECTOR_DisplayPort:
7633 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7634 aconnector->base.ycbcr_420_allowed =
7635 link->link_enc->features.dp_ycbcr420_supported ? true : false;
7637 case DRM_MODE_CONNECTOR_DVID:
7638 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7644 drm_object_attach_property(&aconnector->base.base,
7645 dm->ddev->mode_config.scaling_mode_property,
7646 DRM_MODE_SCALE_NONE);
7648 drm_object_attach_property(&aconnector->base.base,
7649 adev->mode_info.underscan_property,
7651 drm_object_attach_property(&aconnector->base.base,
7652 adev->mode_info.underscan_hborder_property,
7654 drm_object_attach_property(&aconnector->base.base,
7655 adev->mode_info.underscan_vborder_property,
7658 if (!aconnector->mst_port)
7659 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7661 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7662 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7663 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7665 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7666 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7667 drm_object_attach_property(&aconnector->base.base,
7668 adev->mode_info.abm_level_property, 0);
7671 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7672 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7673 connector_type == DRM_MODE_CONNECTOR_eDP) {
7674 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7676 if (!aconnector->mst_port)
7677 drm_connector_attach_vrr_capable_property(&aconnector->base);
7679 #ifdef CONFIG_DRM_AMD_DC_HDCP
7680 if (adev->dm.hdcp_workqueue)
7681 drm_connector_attach_content_protection_property(&aconnector->base, true);
7686 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7687 struct i2c_msg *msgs, int num)
7689 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7690 struct ddc_service *ddc_service = i2c->ddc_service;
7691 struct i2c_command cmd;
7695 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7700 cmd.number_of_payloads = num;
7701 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7704 for (i = 0; i < num; i++) {
7705 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7706 cmd.payloads[i].address = msgs[i].addr;
7707 cmd.payloads[i].length = msgs[i].len;
7708 cmd.payloads[i].data = msgs[i].buf;
7712 ddc_service->ctx->dc,
7713 ddc_service->ddc_pin->hw_info.ddc_channel,
7717 kfree(cmd.payloads);
7721 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7723 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7726 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7727 .master_xfer = amdgpu_dm_i2c_xfer,
7728 .functionality = amdgpu_dm_i2c_func,
7731 static struct amdgpu_i2c_adapter *
7732 create_i2c(struct ddc_service *ddc_service,
7736 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7737 struct amdgpu_i2c_adapter *i2c;
7739 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7742 i2c->base.owner = THIS_MODULE;
7743 i2c->base.class = I2C_CLASS_DDC;
7744 i2c->base.dev.parent = &adev->pdev->dev;
7745 i2c->base.algo = &amdgpu_dm_i2c_algo;
7746 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7747 i2c_set_adapdata(&i2c->base, i2c);
7748 i2c->ddc_service = ddc_service;
7749 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7756 * Note: this function assumes that dc_link_detect() was called for the
7757 * dc_link which will be represented by this aconnector.
7759 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7760 struct amdgpu_dm_connector *aconnector,
7761 uint32_t link_index,
7762 struct amdgpu_encoder *aencoder)
7766 struct dc *dc = dm->dc;
7767 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7768 struct amdgpu_i2c_adapter *i2c;
7770 link->priv = aconnector;
7772 DRM_DEBUG_DRIVER("%s()\n", __func__);
7774 i2c = create_i2c(link->ddc, link->link_index, &res);
7776 DRM_ERROR("Failed to create i2c adapter data\n");
7780 aconnector->i2c = i2c;
7781 res = i2c_add_adapter(&i2c->base);
7784 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7788 connector_type = to_drm_connector_type(link->connector_signal);
7790 res = drm_connector_init_with_ddc(
7793 &amdgpu_dm_connector_funcs,
7798 DRM_ERROR("connector_init failed\n");
7799 aconnector->connector_id = -1;
7803 drm_connector_helper_add(
7805 &amdgpu_dm_connector_helper_funcs);
7807 amdgpu_dm_connector_init_helper(
7814 drm_connector_attach_encoder(
7815 &aconnector->base, &aencoder->base);
7817 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7818 || connector_type == DRM_MODE_CONNECTOR_eDP)
7819 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7824 aconnector->i2c = NULL;
7829 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7831 switch (adev->mode_info.num_crtc) {
7848 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7849 struct amdgpu_encoder *aencoder,
7850 uint32_t link_index)
7852 struct amdgpu_device *adev = drm_to_adev(dev);
7854 int res = drm_encoder_init(dev,
7856 &amdgpu_dm_encoder_funcs,
7857 DRM_MODE_ENCODER_TMDS,
7860 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7863 aencoder->encoder_id = link_index;
7865 aencoder->encoder_id = -1;
7867 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7872 static void manage_dm_interrupts(struct amdgpu_device *adev,
7873 struct amdgpu_crtc *acrtc,
7877 * We have no guarantee that the frontend index maps to the same
7878 * backend index - some even map to more than one.
7880 * TODO: Use a different interrupt or check DC itself for the mapping.
7883 amdgpu_display_crtc_idx_to_irq_type(
7888 drm_crtc_vblank_on(&acrtc->base);
7891 &adev->pageflip_irq,
7893 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7900 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7908 &adev->pageflip_irq,
7910 drm_crtc_vblank_off(&acrtc->base);
7914 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7915 struct amdgpu_crtc *acrtc)
7918 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7921 * This reads the current state for the IRQ and force reapplies
7922 * the setting to hardware.
7924 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7928 is_scaling_state_different(const struct dm_connector_state *dm_state,
7929 const struct dm_connector_state *old_dm_state)
7931 if (dm_state->scaling != old_dm_state->scaling)
7933 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7934 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7936 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7937 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7939 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7940 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7945 #ifdef CONFIG_DRM_AMD_DC_HDCP
7946 static bool is_content_protection_different(struct drm_connector_state *state,
7947 const struct drm_connector_state *old_state,
7948 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7950 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7951 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7953 /* Handle: Type0/1 change */
7954 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7955 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7956 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7960 /* CP is being re enabled, ignore this
7962 * Handles: ENABLED -> DESIRED
7964 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7965 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7966 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7970 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7972 * Handles: UNDESIRED -> ENABLED
7974 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7975 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7976 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7978 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7979 * hot-plug, headless s3, dpms
7981 * Handles: DESIRED -> DESIRED (Special case)
7983 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7984 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7985 dm_con_state->update_hdcp = false;
7990 * Handles: UNDESIRED -> UNDESIRED
7991 * DESIRED -> DESIRED
7992 * ENABLED -> ENABLED
7994 if (old_state->content_protection == state->content_protection)
7998 * Handles: UNDESIRED -> DESIRED
7999 * DESIRED -> UNDESIRED
8000 * ENABLED -> UNDESIRED
8002 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8006 * Handles: DESIRED -> ENABLED
8012 static void remove_stream(struct amdgpu_device *adev,
8013 struct amdgpu_crtc *acrtc,
8014 struct dc_stream_state *stream)
8016 /* this is the update mode case */
8018 acrtc->otg_inst = -1;
8019 acrtc->enabled = false;
8022 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8023 struct dc_cursor_position *position)
8025 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8027 int xorigin = 0, yorigin = 0;
8029 if (!crtc || !plane->state->fb)
8032 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8033 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8034 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8036 plane->state->crtc_w,
8037 plane->state->crtc_h);
8041 x = plane->state->crtc_x;
8042 y = plane->state->crtc_y;
8044 if (x <= -amdgpu_crtc->max_cursor_width ||
8045 y <= -amdgpu_crtc->max_cursor_height)
8049 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8053 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8056 position->enable = true;
8057 position->translate_by_source = true;
8060 position->x_hotspot = xorigin;
8061 position->y_hotspot = yorigin;
8066 static void handle_cursor_update(struct drm_plane *plane,
8067 struct drm_plane_state *old_plane_state)
8069 struct amdgpu_device *adev = drm_to_adev(plane->dev);
8070 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8071 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8072 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8073 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8074 uint64_t address = afb ? afb->address : 0;
8075 struct dc_cursor_position position = {0};
8076 struct dc_cursor_attributes attributes;
8079 if (!plane->state->fb && !old_plane_state->fb)
8082 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8084 amdgpu_crtc->crtc_id,
8085 plane->state->crtc_w,
8086 plane->state->crtc_h);
8088 ret = get_cursor_position(plane, crtc, &position);
8092 if (!position.enable) {
8093 /* turn off cursor */
8094 if (crtc_state && crtc_state->stream) {
8095 mutex_lock(&adev->dm.dc_lock);
8096 dc_stream_set_cursor_position(crtc_state->stream,
8098 mutex_unlock(&adev->dm.dc_lock);
8103 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8104 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8106 memset(&attributes, 0, sizeof(attributes));
8107 attributes.address.high_part = upper_32_bits(address);
8108 attributes.address.low_part = lower_32_bits(address);
8109 attributes.width = plane->state->crtc_w;
8110 attributes.height = plane->state->crtc_h;
8111 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8112 attributes.rotation_angle = 0;
8113 attributes.attribute_flags.value = 0;
8115 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8117 if (crtc_state->stream) {
8118 mutex_lock(&adev->dm.dc_lock);
8119 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8121 DRM_ERROR("DC failed to set cursor attributes\n");
8123 if (!dc_stream_set_cursor_position(crtc_state->stream,
8125 DRM_ERROR("DC failed to set cursor position\n");
8126 mutex_unlock(&adev->dm.dc_lock);
8130 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8133 assert_spin_locked(&acrtc->base.dev->event_lock);
8134 WARN_ON(acrtc->event);
8136 acrtc->event = acrtc->base.state->event;
8138 /* Set the flip status */
8139 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8141 /* Mark this event as consumed */
8142 acrtc->base.state->event = NULL;
8144 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8148 static void update_freesync_state_on_stream(
8149 struct amdgpu_display_manager *dm,
8150 struct dm_crtc_state *new_crtc_state,
8151 struct dc_stream_state *new_stream,
8152 struct dc_plane_state *surface,
8153 u32 flip_timestamp_in_us)
8155 struct mod_vrr_params vrr_params;
8156 struct dc_info_packet vrr_infopacket = {0};
8157 struct amdgpu_device *adev = dm->adev;
8158 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8159 unsigned long flags;
8160 bool pack_sdp_v1_3 = false;
8166 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8167 * For now it's sufficient to just guard against these conditions.
8170 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8173 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8174 vrr_params = acrtc->dm_irq_params.vrr_params;
8177 mod_freesync_handle_preflip(
8178 dm->freesync_module,
8181 flip_timestamp_in_us,
8184 if (adev->family < AMDGPU_FAMILY_AI &&
8185 amdgpu_dm_vrr_active(new_crtc_state)) {
8186 mod_freesync_handle_v_update(dm->freesync_module,
8187 new_stream, &vrr_params);
8189 /* Need to call this before the frame ends. */
8190 dc_stream_adjust_vmin_vmax(dm->dc,
8191 new_crtc_state->stream,
8192 &vrr_params.adjust);
8196 mod_freesync_build_vrr_infopacket(
8197 dm->freesync_module,
8201 TRANSFER_FUNC_UNKNOWN,
8205 new_crtc_state->freesync_timing_changed |=
8206 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8208 sizeof(vrr_params.adjust)) != 0);
8210 new_crtc_state->freesync_vrr_info_changed |=
8211 (memcmp(&new_crtc_state->vrr_infopacket,
8213 sizeof(vrr_infopacket)) != 0);
8215 acrtc->dm_irq_params.vrr_params = vrr_params;
8216 new_crtc_state->vrr_infopacket = vrr_infopacket;
8218 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8219 new_stream->vrr_infopacket = vrr_infopacket;
8221 if (new_crtc_state->freesync_vrr_info_changed)
8222 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8223 new_crtc_state->base.crtc->base.id,
8224 (int)new_crtc_state->base.vrr_enabled,
8225 (int)vrr_params.state);
8227 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8230 static void update_stream_irq_parameters(
8231 struct amdgpu_display_manager *dm,
8232 struct dm_crtc_state *new_crtc_state)
8234 struct dc_stream_state *new_stream = new_crtc_state->stream;
8235 struct mod_vrr_params vrr_params;
8236 struct mod_freesync_config config = new_crtc_state->freesync_config;
8237 struct amdgpu_device *adev = dm->adev;
8238 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8239 unsigned long flags;
8245 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8246 * For now it's sufficient to just guard against these conditions.
8248 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8251 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8252 vrr_params = acrtc->dm_irq_params.vrr_params;
8254 if (new_crtc_state->vrr_supported &&
8255 config.min_refresh_in_uhz &&
8256 config.max_refresh_in_uhz) {
8258 * if freesync compatible mode was set, config.state will be set
8261 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8262 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8263 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8264 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8265 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8266 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8267 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8269 config.state = new_crtc_state->base.vrr_enabled ?
8270 VRR_STATE_ACTIVE_VARIABLE :
8274 config.state = VRR_STATE_UNSUPPORTED;
8277 mod_freesync_build_vrr_params(dm->freesync_module,
8279 &config, &vrr_params);
8281 new_crtc_state->freesync_timing_changed |=
8282 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8283 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8285 new_crtc_state->freesync_config = config;
8286 /* Copy state for access from DM IRQ handler */
8287 acrtc->dm_irq_params.freesync_config = config;
8288 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8289 acrtc->dm_irq_params.vrr_params = vrr_params;
8290 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8293 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8294 struct dm_crtc_state *new_state)
8296 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8297 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8299 if (!old_vrr_active && new_vrr_active) {
8300 /* Transition VRR inactive -> active:
8301 * While VRR is active, we must not disable vblank irq, as a
8302 * reenable after disable would compute bogus vblank/pflip
8303 * timestamps if it likely happened inside display front-porch.
8305 * We also need vupdate irq for the actual core vblank handling
8308 dm_set_vupdate_irq(new_state->base.crtc, true);
8309 drm_crtc_vblank_get(new_state->base.crtc);
8310 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8311 __func__, new_state->base.crtc->base.id);
8312 } else if (old_vrr_active && !new_vrr_active) {
8313 /* Transition VRR active -> inactive:
8314 * Allow vblank irq disable again for fixed refresh rate.
8316 dm_set_vupdate_irq(new_state->base.crtc, false);
8317 drm_crtc_vblank_put(new_state->base.crtc);
8318 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8319 __func__, new_state->base.crtc->base.id);
8323 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8325 struct drm_plane *plane;
8326 struct drm_plane_state *old_plane_state;
8330 * TODO: Make this per-stream so we don't issue redundant updates for
8331 * commits with multiple streams.
8333 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8334 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8335 handle_cursor_update(plane, old_plane_state);
8338 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8339 struct dc_state *dc_state,
8340 struct drm_device *dev,
8341 struct amdgpu_display_manager *dm,
8342 struct drm_crtc *pcrtc,
8343 bool wait_for_vblank)
8346 uint64_t timestamp_ns;
8347 struct drm_plane *plane;
8348 struct drm_plane_state *old_plane_state, *new_plane_state;
8349 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8350 struct drm_crtc_state *new_pcrtc_state =
8351 drm_atomic_get_new_crtc_state(state, pcrtc);
8352 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8353 struct dm_crtc_state *dm_old_crtc_state =
8354 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8355 int planes_count = 0, vpos, hpos;
8357 unsigned long flags;
8358 struct amdgpu_bo *abo;
8359 uint32_t target_vblank, last_flip_vblank;
8360 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8361 bool pflip_present = false;
8363 struct dc_surface_update surface_updates[MAX_SURFACES];
8364 struct dc_plane_info plane_infos[MAX_SURFACES];
8365 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8366 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8367 struct dc_stream_update stream_update;
8370 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8373 dm_error("Failed to allocate update bundle\n");
8378 * Disable the cursor first if we're disabling all the planes.
8379 * It'll remain on the screen after the planes are re-enabled
8382 if (acrtc_state->active_planes == 0)
8383 amdgpu_dm_commit_cursors(state);
8385 /* update planes when needed */
8386 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8387 struct drm_crtc *crtc = new_plane_state->crtc;
8388 struct drm_crtc_state *new_crtc_state;
8389 struct drm_framebuffer *fb = new_plane_state->fb;
8390 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8391 bool plane_needs_flip;
8392 struct dc_plane_state *dc_plane;
8393 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8395 /* Cursor plane is handled after stream updates */
8396 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8399 if (!fb || !crtc || pcrtc != crtc)
8402 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8403 if (!new_crtc_state->active)
8406 dc_plane = dm_new_plane_state->dc_state;
8408 bundle->surface_updates[planes_count].surface = dc_plane;
8409 if (new_pcrtc_state->color_mgmt_changed) {
8410 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8411 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8412 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8415 fill_dc_scaling_info(new_plane_state,
8416 &bundle->scaling_infos[planes_count]);
8418 bundle->surface_updates[planes_count].scaling_info =
8419 &bundle->scaling_infos[planes_count];
8421 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8423 pflip_present = pflip_present || plane_needs_flip;
8425 if (!plane_needs_flip) {
8430 abo = gem_to_amdgpu_bo(fb->obj[0]);
8433 * Wait for all fences on this FB. Do limited wait to avoid
8434 * deadlock during GPU reset when this fence will not signal
8435 * but we hold reservation lock for the BO.
8437 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8439 msecs_to_jiffies(5000));
8440 if (unlikely(r <= 0))
8441 DRM_ERROR("Waiting for fences timed out!");
8443 fill_dc_plane_info_and_addr(
8444 dm->adev, new_plane_state,
8446 &bundle->plane_infos[planes_count],
8447 &bundle->flip_addrs[planes_count].address,
8448 afb->tmz_surface, false);
8450 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8451 new_plane_state->plane->index,
8452 bundle->plane_infos[planes_count].dcc.enable);
8454 bundle->surface_updates[planes_count].plane_info =
8455 &bundle->plane_infos[planes_count];
8458 * Only allow immediate flips for fast updates that don't
8459 * change FB pitch, DCC state, rotation or mirroing.
8461 bundle->flip_addrs[planes_count].flip_immediate =
8462 crtc->state->async_flip &&
8463 acrtc_state->update_type == UPDATE_TYPE_FAST;
8465 timestamp_ns = ktime_get_ns();
8466 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8467 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8468 bundle->surface_updates[planes_count].surface = dc_plane;
8470 if (!bundle->surface_updates[planes_count].surface) {
8471 DRM_ERROR("No surface for CRTC: id=%d\n",
8472 acrtc_attach->crtc_id);
8476 if (plane == pcrtc->primary)
8477 update_freesync_state_on_stream(
8480 acrtc_state->stream,
8482 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8484 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8486 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8487 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8493 if (pflip_present) {
8495 /* Use old throttling in non-vrr fixed refresh rate mode
8496 * to keep flip scheduling based on target vblank counts
8497 * working in a backwards compatible way, e.g., for
8498 * clients using the GLX_OML_sync_control extension or
8499 * DRI3/Present extension with defined target_msc.
8501 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8504 /* For variable refresh rate mode only:
8505 * Get vblank of last completed flip to avoid > 1 vrr
8506 * flips per video frame by use of throttling, but allow
8507 * flip programming anywhere in the possibly large
8508 * variable vrr vblank interval for fine-grained flip
8509 * timing control and more opportunity to avoid stutter
8510 * on late submission of flips.
8512 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8513 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8514 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8517 target_vblank = last_flip_vblank + wait_for_vblank;
8520 * Wait until we're out of the vertical blank period before the one
8521 * targeted by the flip
8523 while ((acrtc_attach->enabled &&
8524 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8525 0, &vpos, &hpos, NULL,
8526 NULL, &pcrtc->hwmode)
8527 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8528 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8529 (int)(target_vblank -
8530 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8531 usleep_range(1000, 1100);
8535 * Prepare the flip event for the pageflip interrupt to handle.
8537 * This only works in the case where we've already turned on the
8538 * appropriate hardware blocks (eg. HUBP) so in the transition case
8539 * from 0 -> n planes we have to skip a hardware generated event
8540 * and rely on sending it from software.
8542 if (acrtc_attach->base.state->event &&
8543 acrtc_state->active_planes > 0) {
8544 drm_crtc_vblank_get(pcrtc);
8546 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8548 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8549 prepare_flip_isr(acrtc_attach);
8551 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8554 if (acrtc_state->stream) {
8555 if (acrtc_state->freesync_vrr_info_changed)
8556 bundle->stream_update.vrr_infopacket =
8557 &acrtc_state->stream->vrr_infopacket;
8561 /* Update the planes if changed or disable if we don't have any. */
8562 if ((planes_count || acrtc_state->active_planes == 0) &&
8563 acrtc_state->stream) {
8564 bundle->stream_update.stream = acrtc_state->stream;
8565 if (new_pcrtc_state->mode_changed) {
8566 bundle->stream_update.src = acrtc_state->stream->src;
8567 bundle->stream_update.dst = acrtc_state->stream->dst;
8570 if (new_pcrtc_state->color_mgmt_changed) {
8572 * TODO: This isn't fully correct since we've actually
8573 * already modified the stream in place.
8575 bundle->stream_update.gamut_remap =
8576 &acrtc_state->stream->gamut_remap_matrix;
8577 bundle->stream_update.output_csc_transform =
8578 &acrtc_state->stream->csc_color_matrix;
8579 bundle->stream_update.out_transfer_func =
8580 acrtc_state->stream->out_transfer_func;
8583 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8584 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8585 bundle->stream_update.abm_level = &acrtc_state->abm_level;
8588 * If FreeSync state on the stream has changed then we need to
8589 * re-adjust the min/max bounds now that DC doesn't handle this
8590 * as part of commit.
8592 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8593 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8594 dc_stream_adjust_vmin_vmax(
8595 dm->dc, acrtc_state->stream,
8596 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8597 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8599 mutex_lock(&dm->dc_lock);
8600 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8601 acrtc_state->stream->link->psr_settings.psr_allow_active)
8602 amdgpu_dm_psr_disable(acrtc_state->stream);
8604 dc_commit_updates_for_stream(dm->dc,
8605 bundle->surface_updates,
8607 acrtc_state->stream,
8608 &bundle->stream_update,
8612 * Enable or disable the interrupts on the backend.
8614 * Most pipes are put into power gating when unused.
8616 * When power gating is enabled on a pipe we lose the
8617 * interrupt enablement state when power gating is disabled.
8619 * So we need to update the IRQ control state in hardware
8620 * whenever the pipe turns on (since it could be previously
8621 * power gated) or off (since some pipes can't be power gated
8624 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8625 dm_update_pflip_irq_state(drm_to_adev(dev),
8628 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8629 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8630 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8631 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8632 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8633 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8634 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8635 amdgpu_dm_psr_enable(acrtc_state->stream);
8638 mutex_unlock(&dm->dc_lock);
8642 * Update cursor state *after* programming all the planes.
8643 * This avoids redundant programming in the case where we're going
8644 * to be disabling a single plane - those pipes are being disabled.
8646 if (acrtc_state->active_planes)
8647 amdgpu_dm_commit_cursors(state);
8653 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8654 struct drm_atomic_state *state)
8656 struct amdgpu_device *adev = drm_to_adev(dev);
8657 struct amdgpu_dm_connector *aconnector;
8658 struct drm_connector *connector;
8659 struct drm_connector_state *old_con_state, *new_con_state;
8660 struct drm_crtc_state *new_crtc_state;
8661 struct dm_crtc_state *new_dm_crtc_state;
8662 const struct dc_stream_status *status;
8665 /* Notify device removals. */
8666 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8667 if (old_con_state->crtc != new_con_state->crtc) {
8668 /* CRTC changes require notification. */
8672 if (!new_con_state->crtc)
8675 new_crtc_state = drm_atomic_get_new_crtc_state(
8676 state, new_con_state->crtc);
8678 if (!new_crtc_state)
8681 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8685 aconnector = to_amdgpu_dm_connector(connector);
8687 mutex_lock(&adev->dm.audio_lock);
8688 inst = aconnector->audio_inst;
8689 aconnector->audio_inst = -1;
8690 mutex_unlock(&adev->dm.audio_lock);
8692 amdgpu_dm_audio_eld_notify(adev, inst);
8695 /* Notify audio device additions. */
8696 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8697 if (!new_con_state->crtc)
8700 new_crtc_state = drm_atomic_get_new_crtc_state(
8701 state, new_con_state->crtc);
8703 if (!new_crtc_state)
8706 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8709 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8710 if (!new_dm_crtc_state->stream)
8713 status = dc_stream_get_status(new_dm_crtc_state->stream);
8717 aconnector = to_amdgpu_dm_connector(connector);
8719 mutex_lock(&adev->dm.audio_lock);
8720 inst = status->audio_inst;
8721 aconnector->audio_inst = inst;
8722 mutex_unlock(&adev->dm.audio_lock);
8724 amdgpu_dm_audio_eld_notify(adev, inst);
8729 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8730 * @crtc_state: the DRM CRTC state
8731 * @stream_state: the DC stream state.
8733 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8734 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8736 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8737 struct dc_stream_state *stream_state)
8739 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8743 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8744 * @state: The atomic state to commit
8746 * This will tell DC to commit the constructed DC state from atomic_check,
8747 * programming the hardware. Any failures here implies a hardware failure, since
8748 * atomic check should have filtered anything non-kosher.
8750 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8752 struct drm_device *dev = state->dev;
8753 struct amdgpu_device *adev = drm_to_adev(dev);
8754 struct amdgpu_display_manager *dm = &adev->dm;
8755 struct dm_atomic_state *dm_state;
8756 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8758 struct drm_crtc *crtc;
8759 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8760 unsigned long flags;
8761 bool wait_for_vblank = true;
8762 struct drm_connector *connector;
8763 struct drm_connector_state *old_con_state, *new_con_state;
8764 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8765 int crtc_disable_count = 0;
8766 bool mode_set_reset_required = false;
8768 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8770 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8772 dm_state = dm_atomic_get_new_state(state);
8773 if (dm_state && dm_state->context) {
8774 dc_state = dm_state->context;
8776 /* No state changes, retain current state. */
8777 dc_state_temp = dc_create_state(dm->dc);
8778 ASSERT(dc_state_temp);
8779 dc_state = dc_state_temp;
8780 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8783 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8784 new_crtc_state, i) {
8785 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8787 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8789 if (old_crtc_state->active &&
8790 (!new_crtc_state->active ||
8791 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8792 manage_dm_interrupts(adev, acrtc, false);
8793 dc_stream_release(dm_old_crtc_state->stream);
8797 drm_atomic_helper_calc_timestamping_constants(state);
8799 /* update changed items */
8800 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8801 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8803 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8804 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8807 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8808 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8809 "connectors_changed:%d\n",
8811 new_crtc_state->enable,
8812 new_crtc_state->active,
8813 new_crtc_state->planes_changed,
8814 new_crtc_state->mode_changed,
8815 new_crtc_state->active_changed,
8816 new_crtc_state->connectors_changed);
8818 /* Disable cursor if disabling crtc */
8819 if (old_crtc_state->active && !new_crtc_state->active) {
8820 struct dc_cursor_position position;
8822 memset(&position, 0, sizeof(position));
8823 mutex_lock(&dm->dc_lock);
8824 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8825 mutex_unlock(&dm->dc_lock);
8828 /* Copy all transient state flags into dc state */
8829 if (dm_new_crtc_state->stream) {
8830 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8831 dm_new_crtc_state->stream);
8834 /* handles headless hotplug case, updating new_state and
8835 * aconnector as needed
8838 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8840 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8842 if (!dm_new_crtc_state->stream) {
8844 * this could happen because of issues with
8845 * userspace notifications delivery.
8846 * In this case userspace tries to set mode on
8847 * display which is disconnected in fact.
8848 * dc_sink is NULL in this case on aconnector.
8849 * We expect reset mode will come soon.
8851 * This can also happen when unplug is done
8852 * during resume sequence ended
8854 * In this case, we want to pretend we still
8855 * have a sink to keep the pipe running so that
8856 * hw state is consistent with the sw state
8858 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8859 __func__, acrtc->base.base.id);
8863 if (dm_old_crtc_state->stream)
8864 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8866 pm_runtime_get_noresume(dev->dev);
8868 acrtc->enabled = true;
8869 acrtc->hw_mode = new_crtc_state->mode;
8870 crtc->hwmode = new_crtc_state->mode;
8871 mode_set_reset_required = true;
8872 } else if (modereset_required(new_crtc_state)) {
8873 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8874 /* i.e. reset mode */
8875 if (dm_old_crtc_state->stream)
8876 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8878 mode_set_reset_required = true;
8880 } /* for_each_crtc_in_state() */
8883 /* if there mode set or reset, disable eDP PSR */
8884 if (mode_set_reset_required)
8885 amdgpu_dm_psr_disable_all(dm);
8887 dm_enable_per_frame_crtc_master_sync(dc_state);
8888 mutex_lock(&dm->dc_lock);
8889 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8890 #if defined(CONFIG_DRM_AMD_DC_DCN)
8891 /* Allow idle optimization when vblank count is 0 for display off */
8892 if (dm->active_vblank_irq_count == 0)
8893 dc_allow_idle_optimizations(dm->dc,true);
8895 mutex_unlock(&dm->dc_lock);
8898 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8899 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8901 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8903 if (dm_new_crtc_state->stream != NULL) {
8904 const struct dc_stream_status *status =
8905 dc_stream_get_status(dm_new_crtc_state->stream);
8908 status = dc_stream_get_status_from_state(dc_state,
8909 dm_new_crtc_state->stream);
8911 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8913 acrtc->otg_inst = status->primary_otg_inst;
8916 #ifdef CONFIG_DRM_AMD_DC_HDCP
8917 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8918 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8919 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8920 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8922 new_crtc_state = NULL;
8925 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8927 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8929 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8930 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8931 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8932 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8933 dm_new_con_state->update_hdcp = true;
8937 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8938 hdcp_update_display(
8939 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8940 new_con_state->hdcp_content_type,
8941 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8945 /* Handle connector state changes */
8946 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8947 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8948 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8949 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8950 struct dc_surface_update dummy_updates[MAX_SURFACES];
8951 struct dc_stream_update stream_update;
8952 struct dc_info_packet hdr_packet;
8953 struct dc_stream_status *status = NULL;
8954 bool abm_changed, hdr_changed, scaling_changed;
8956 memset(&dummy_updates, 0, sizeof(dummy_updates));
8957 memset(&stream_update, 0, sizeof(stream_update));
8960 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8961 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8964 /* Skip any modesets/resets */
8965 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8968 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8969 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8971 scaling_changed = is_scaling_state_different(dm_new_con_state,
8974 abm_changed = dm_new_crtc_state->abm_level !=
8975 dm_old_crtc_state->abm_level;
8978 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
8980 if (!scaling_changed && !abm_changed && !hdr_changed)
8983 stream_update.stream = dm_new_crtc_state->stream;
8984 if (scaling_changed) {
8985 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8986 dm_new_con_state, dm_new_crtc_state->stream);
8988 stream_update.src = dm_new_crtc_state->stream->src;
8989 stream_update.dst = dm_new_crtc_state->stream->dst;
8993 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8995 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8999 fill_hdr_info_packet(new_con_state, &hdr_packet);
9000 stream_update.hdr_static_metadata = &hdr_packet;
9003 status = dc_stream_get_status(dm_new_crtc_state->stream);
9005 WARN_ON(!status->plane_count);
9008 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9009 * Here we create an empty update on each plane.
9010 * To fix this, DC should permit updating only stream properties.
9012 for (j = 0; j < status->plane_count; j++)
9013 dummy_updates[j].surface = status->plane_states[0];
9016 mutex_lock(&dm->dc_lock);
9017 dc_commit_updates_for_stream(dm->dc,
9019 status->plane_count,
9020 dm_new_crtc_state->stream,
9023 mutex_unlock(&dm->dc_lock);
9026 /* Count number of newly disabled CRTCs for dropping PM refs later. */
9027 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9028 new_crtc_state, i) {
9029 if (old_crtc_state->active && !new_crtc_state->active)
9030 crtc_disable_count++;
9032 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9033 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9035 /* For freesync config update on crtc state and params for irq */
9036 update_stream_irq_parameters(dm, dm_new_crtc_state);
9038 /* Handle vrr on->off / off->on transitions */
9039 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9044 * Enable interrupts for CRTCs that are newly enabled or went through
9045 * a modeset. It was intentionally deferred until after the front end
9046 * state was modified to wait until the OTG was on and so the IRQ
9047 * handlers didn't access stale or invalid state.
9049 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9050 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9051 #ifdef CONFIG_DEBUG_FS
9052 bool configure_crc = false;
9053 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9054 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9055 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9057 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9058 cur_crc_src = acrtc->dm_irq_params.crc_src;
9059 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9061 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9063 if (new_crtc_state->active &&
9064 (!old_crtc_state->active ||
9065 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9066 dc_stream_retain(dm_new_crtc_state->stream);
9067 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9068 manage_dm_interrupts(adev, acrtc, true);
9070 #ifdef CONFIG_DEBUG_FS
9072 * Frontend may have changed so reapply the CRC capture
9073 * settings for the stream.
9075 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9077 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9078 configure_crc = true;
9079 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9080 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9081 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9082 acrtc->dm_irq_params.crc_window.update_win = true;
9083 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9084 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9085 crc_rd_wrk->crtc = crtc;
9086 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9087 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9093 if (amdgpu_dm_crtc_configure_crc_source(
9094 crtc, dm_new_crtc_state, cur_crc_src))
9095 DRM_DEBUG_DRIVER("Failed to configure crc source");
9100 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9101 if (new_crtc_state->async_flip)
9102 wait_for_vblank = false;
9104 /* update planes when needed per crtc*/
9105 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9106 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9108 if (dm_new_crtc_state->stream)
9109 amdgpu_dm_commit_planes(state, dc_state, dev,
9110 dm, crtc, wait_for_vblank);
9113 /* Update audio instances for each connector. */
9114 amdgpu_dm_commit_audio(dev, state);
9116 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9117 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9118 /* restore the backlight level */
9119 if (dm->backlight_dev)
9120 amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
9123 * send vblank event on all events not handled in flip and
9124 * mark consumed event for drm_atomic_helper_commit_hw_done
9126 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9127 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9129 if (new_crtc_state->event)
9130 drm_send_event_locked(dev, &new_crtc_state->event->base);
9132 new_crtc_state->event = NULL;
9134 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9136 /* Signal HW programming completion */
9137 drm_atomic_helper_commit_hw_done(state);
9139 if (wait_for_vblank)
9140 drm_atomic_helper_wait_for_flip_done(dev, state);
9142 drm_atomic_helper_cleanup_planes(dev, state);
9144 /* return the stolen vga memory back to VRAM */
9145 if (!adev->mman.keep_stolen_vga_memory)
9146 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9147 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9150 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9151 * so we can put the GPU into runtime suspend if we're not driving any
9154 for (i = 0; i < crtc_disable_count; i++)
9155 pm_runtime_put_autosuspend(dev->dev);
9156 pm_runtime_mark_last_busy(dev->dev);
9159 dc_release_state(dc_state_temp);
9163 static int dm_force_atomic_commit(struct drm_connector *connector)
9166 struct drm_device *ddev = connector->dev;
9167 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9168 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9169 struct drm_plane *plane = disconnected_acrtc->base.primary;
9170 struct drm_connector_state *conn_state;
9171 struct drm_crtc_state *crtc_state;
9172 struct drm_plane_state *plane_state;
9177 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9179 /* Construct an atomic state to restore previous display setting */
9182 * Attach connectors to drm_atomic_state
9184 conn_state = drm_atomic_get_connector_state(state, connector);
9186 ret = PTR_ERR_OR_ZERO(conn_state);
9190 /* Attach crtc to drm_atomic_state*/
9191 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9193 ret = PTR_ERR_OR_ZERO(crtc_state);
9197 /* force a restore */
9198 crtc_state->mode_changed = true;
9200 /* Attach plane to drm_atomic_state */
9201 plane_state = drm_atomic_get_plane_state(state, plane);
9203 ret = PTR_ERR_OR_ZERO(plane_state);
9207 /* Call commit internally with the state we just constructed */
9208 ret = drm_atomic_commit(state);
9211 drm_atomic_state_put(state);
9213 DRM_ERROR("Restoring old state failed with %i\n", ret);
9219 * This function handles all cases when set mode does not come upon hotplug.
9220 * This includes when a display is unplugged then plugged back into the
9221 * same port and when running without usermode desktop manager supprot
9223 void dm_restore_drm_connector_state(struct drm_device *dev,
9224 struct drm_connector *connector)
9226 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9227 struct amdgpu_crtc *disconnected_acrtc;
9228 struct dm_crtc_state *acrtc_state;
9230 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9233 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9234 if (!disconnected_acrtc)
9237 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9238 if (!acrtc_state->stream)
9242 * If the previous sink is not released and different from the current,
9243 * we deduce we are in a state where we can not rely on usermode call
9244 * to turn on the display, so we do it here
9246 if (acrtc_state->stream->sink != aconnector->dc_sink)
9247 dm_force_atomic_commit(&aconnector->base);
9251 * Grabs all modesetting locks to serialize against any blocking commits,
9252 * Waits for completion of all non blocking commits.
9254 static int do_aquire_global_lock(struct drm_device *dev,
9255 struct drm_atomic_state *state)
9257 struct drm_crtc *crtc;
9258 struct drm_crtc_commit *commit;
9262 * Adding all modeset locks to aquire_ctx will
9263 * ensure that when the framework release it the
9264 * extra locks we are locking here will get released to
9266 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9270 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9271 spin_lock(&crtc->commit_lock);
9272 commit = list_first_entry_or_null(&crtc->commit_list,
9273 struct drm_crtc_commit, commit_entry);
9275 drm_crtc_commit_get(commit);
9276 spin_unlock(&crtc->commit_lock);
9282 * Make sure all pending HW programming completed and
9285 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9288 ret = wait_for_completion_interruptible_timeout(
9289 &commit->flip_done, 10*HZ);
9292 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9293 "timed out\n", crtc->base.id, crtc->name);
9295 drm_crtc_commit_put(commit);
9298 return ret < 0 ? ret : 0;
9301 static void get_freesync_config_for_crtc(
9302 struct dm_crtc_state *new_crtc_state,
9303 struct dm_connector_state *new_con_state)
9305 struct mod_freesync_config config = {0};
9306 struct amdgpu_dm_connector *aconnector =
9307 to_amdgpu_dm_connector(new_con_state->base.connector);
9308 struct drm_display_mode *mode = &new_crtc_state->base.mode;
9309 int vrefresh = drm_mode_vrefresh(mode);
9310 bool fs_vid_mode = false;
9312 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9313 vrefresh >= aconnector->min_vfreq &&
9314 vrefresh <= aconnector->max_vfreq;
9316 if (new_crtc_state->vrr_supported) {
9317 new_crtc_state->stream->ignore_msa_timing_param = true;
9318 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9320 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9321 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9322 config.vsif_supported = true;
9326 config.state = VRR_STATE_ACTIVE_FIXED;
9327 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9329 } else if (new_crtc_state->base.vrr_enabled) {
9330 config.state = VRR_STATE_ACTIVE_VARIABLE;
9332 config.state = VRR_STATE_INACTIVE;
9336 new_crtc_state->freesync_config = config;
9339 static void reset_freesync_config_for_crtc(
9340 struct dm_crtc_state *new_crtc_state)
9342 new_crtc_state->vrr_supported = false;
9344 memset(&new_crtc_state->vrr_infopacket, 0,
9345 sizeof(new_crtc_state->vrr_infopacket));
9349 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9350 struct drm_crtc_state *new_crtc_state)
9352 struct drm_display_mode old_mode, new_mode;
9354 if (!old_crtc_state || !new_crtc_state)
9357 old_mode = old_crtc_state->mode;
9358 new_mode = new_crtc_state->mode;
9360 if (old_mode.clock == new_mode.clock &&
9361 old_mode.hdisplay == new_mode.hdisplay &&
9362 old_mode.vdisplay == new_mode.vdisplay &&
9363 old_mode.htotal == new_mode.htotal &&
9364 old_mode.vtotal != new_mode.vtotal &&
9365 old_mode.hsync_start == new_mode.hsync_start &&
9366 old_mode.vsync_start != new_mode.vsync_start &&
9367 old_mode.hsync_end == new_mode.hsync_end &&
9368 old_mode.vsync_end != new_mode.vsync_end &&
9369 old_mode.hskew == new_mode.hskew &&
9370 old_mode.vscan == new_mode.vscan &&
9371 (old_mode.vsync_end - old_mode.vsync_start) ==
9372 (new_mode.vsync_end - new_mode.vsync_start))
9378 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9379 uint64_t num, den, res;
9380 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9382 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9384 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9385 den = (unsigned long long)new_crtc_state->mode.htotal *
9386 (unsigned long long)new_crtc_state->mode.vtotal;
9388 res = div_u64(num, den);
9389 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9392 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9393 struct drm_atomic_state *state,
9394 struct drm_crtc *crtc,
9395 struct drm_crtc_state *old_crtc_state,
9396 struct drm_crtc_state *new_crtc_state,
9398 bool *lock_and_validation_needed)
9400 struct dm_atomic_state *dm_state = NULL;
9401 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9402 struct dc_stream_state *new_stream;
9406 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9407 * update changed items
9409 struct amdgpu_crtc *acrtc = NULL;
9410 struct amdgpu_dm_connector *aconnector = NULL;
9411 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9412 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9416 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9417 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9418 acrtc = to_amdgpu_crtc(crtc);
9419 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9421 /* TODO This hack should go away */
9422 if (aconnector && enable) {
9423 /* Make sure fake sink is created in plug-in scenario */
9424 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9426 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9429 if (IS_ERR(drm_new_conn_state)) {
9430 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9434 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9435 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9437 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9440 new_stream = create_validate_stream_for_sink(aconnector,
9441 &new_crtc_state->mode,
9443 dm_old_crtc_state->stream);
9446 * we can have no stream on ACTION_SET if a display
9447 * was disconnected during S3, in this case it is not an
9448 * error, the OS will be updated after detection, and
9449 * will do the right thing on next atomic commit
9453 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9454 __func__, acrtc->base.base.id);
9460 * TODO: Check VSDB bits to decide whether this should
9461 * be enabled or not.
9463 new_stream->triggered_crtc_reset.enabled =
9464 dm->force_timing_sync;
9466 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9468 ret = fill_hdr_info_packet(drm_new_conn_state,
9469 &new_stream->hdr_static_metadata);
9474 * If we already removed the old stream from the context
9475 * (and set the new stream to NULL) then we can't reuse
9476 * the old stream even if the stream and scaling are unchanged.
9477 * We'll hit the BUG_ON and black screen.
9479 * TODO: Refactor this function to allow this check to work
9480 * in all conditions.
9482 if (amdgpu_freesync_vid_mode &&
9483 dm_new_crtc_state->stream &&
9484 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9487 if (dm_new_crtc_state->stream &&
9488 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9489 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9490 new_crtc_state->mode_changed = false;
9491 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9492 new_crtc_state->mode_changed);
9496 /* mode_changed flag may get updated above, need to check again */
9497 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9501 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9502 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9503 "connectors_changed:%d\n",
9505 new_crtc_state->enable,
9506 new_crtc_state->active,
9507 new_crtc_state->planes_changed,
9508 new_crtc_state->mode_changed,
9509 new_crtc_state->active_changed,
9510 new_crtc_state->connectors_changed);
9512 /* Remove stream for any changed/disabled CRTC */
9515 if (!dm_old_crtc_state->stream)
9518 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9519 is_timing_unchanged_for_freesync(new_crtc_state,
9521 new_crtc_state->mode_changed = false;
9523 "Mode change not required for front porch change, "
9524 "setting mode_changed to %d",
9525 new_crtc_state->mode_changed);
9527 set_freesync_fixed_config(dm_new_crtc_state);
9530 } else if (amdgpu_freesync_vid_mode && aconnector &&
9531 is_freesync_video_mode(&new_crtc_state->mode,
9533 set_freesync_fixed_config(dm_new_crtc_state);
9536 ret = dm_atomic_get_state(state, &dm_state);
9540 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9543 /* i.e. reset mode */
9544 if (dc_remove_stream_from_ctx(
9547 dm_old_crtc_state->stream) != DC_OK) {
9552 dc_stream_release(dm_old_crtc_state->stream);
9553 dm_new_crtc_state->stream = NULL;
9555 reset_freesync_config_for_crtc(dm_new_crtc_state);
9557 *lock_and_validation_needed = true;
9559 } else {/* Add stream for any updated/enabled CRTC */
9561 * Quick fix to prevent NULL pointer on new_stream when
9562 * added MST connectors not found in existing crtc_state in the chained mode
9563 * TODO: need to dig out the root cause of that
9565 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9568 if (modereset_required(new_crtc_state))
9571 if (modeset_required(new_crtc_state, new_stream,
9572 dm_old_crtc_state->stream)) {
9574 WARN_ON(dm_new_crtc_state->stream);
9576 ret = dm_atomic_get_state(state, &dm_state);
9580 dm_new_crtc_state->stream = new_stream;
9582 dc_stream_retain(new_stream);
9584 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9587 if (dc_add_stream_to_ctx(
9590 dm_new_crtc_state->stream) != DC_OK) {
9595 *lock_and_validation_needed = true;
9600 /* Release extra reference */
9602 dc_stream_release(new_stream);
9605 * We want to do dc stream updates that do not require a
9606 * full modeset below.
9608 if (!(enable && aconnector && new_crtc_state->active))
9611 * Given above conditions, the dc state cannot be NULL because:
9612 * 1. We're in the process of enabling CRTCs (just been added
9613 * to the dc context, or already is on the context)
9614 * 2. Has a valid connector attached, and
9615 * 3. Is currently active and enabled.
9616 * => The dc stream state currently exists.
9618 BUG_ON(dm_new_crtc_state->stream == NULL);
9620 /* Scaling or underscan settings */
9621 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9622 update_stream_scaling_settings(
9623 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9626 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9629 * Color management settings. We also update color properties
9630 * when a modeset is needed, to ensure it gets reprogrammed.
9632 if (dm_new_crtc_state->base.color_mgmt_changed ||
9633 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9634 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9639 /* Update Freesync settings. */
9640 get_freesync_config_for_crtc(dm_new_crtc_state,
9647 dc_stream_release(new_stream);
9651 static bool should_reset_plane(struct drm_atomic_state *state,
9652 struct drm_plane *plane,
9653 struct drm_plane_state *old_plane_state,
9654 struct drm_plane_state *new_plane_state)
9656 struct drm_plane *other;
9657 struct drm_plane_state *old_other_state, *new_other_state;
9658 struct drm_crtc_state *new_crtc_state;
9662 * TODO: Remove this hack once the checks below are sufficient
9663 * enough to determine when we need to reset all the planes on
9666 if (state->allow_modeset)
9669 /* Exit early if we know that we're adding or removing the plane. */
9670 if (old_plane_state->crtc != new_plane_state->crtc)
9673 /* old crtc == new_crtc == NULL, plane not in context. */
9674 if (!new_plane_state->crtc)
9678 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9680 if (!new_crtc_state)
9683 /* CRTC Degamma changes currently require us to recreate planes. */
9684 if (new_crtc_state->color_mgmt_changed)
9687 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9691 * If there are any new primary or overlay planes being added or
9692 * removed then the z-order can potentially change. To ensure
9693 * correct z-order and pipe acquisition the current DC architecture
9694 * requires us to remove and recreate all existing planes.
9696 * TODO: Come up with a more elegant solution for this.
9698 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9699 struct amdgpu_framebuffer *old_afb, *new_afb;
9700 if (other->type == DRM_PLANE_TYPE_CURSOR)
9703 if (old_other_state->crtc != new_plane_state->crtc &&
9704 new_other_state->crtc != new_plane_state->crtc)
9707 if (old_other_state->crtc != new_other_state->crtc)
9710 /* Src/dst size and scaling updates. */
9711 if (old_other_state->src_w != new_other_state->src_w ||
9712 old_other_state->src_h != new_other_state->src_h ||
9713 old_other_state->crtc_w != new_other_state->crtc_w ||
9714 old_other_state->crtc_h != new_other_state->crtc_h)
9717 /* Rotation / mirroring updates. */
9718 if (old_other_state->rotation != new_other_state->rotation)
9721 /* Blending updates. */
9722 if (old_other_state->pixel_blend_mode !=
9723 new_other_state->pixel_blend_mode)
9726 /* Alpha updates. */
9727 if (old_other_state->alpha != new_other_state->alpha)
9730 /* Colorspace changes. */
9731 if (old_other_state->color_range != new_other_state->color_range ||
9732 old_other_state->color_encoding != new_other_state->color_encoding)
9735 /* Framebuffer checks fall at the end. */
9736 if (!old_other_state->fb || !new_other_state->fb)
9739 /* Pixel format changes can require bandwidth updates. */
9740 if (old_other_state->fb->format != new_other_state->fb->format)
9743 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9744 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9746 /* Tiling and DCC changes also require bandwidth updates. */
9747 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9748 old_afb->base.modifier != new_afb->base.modifier)
9755 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9756 struct drm_plane_state *new_plane_state,
9757 struct drm_framebuffer *fb)
9759 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9760 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9764 if (fb->width > new_acrtc->max_cursor_width ||
9765 fb->height > new_acrtc->max_cursor_height) {
9766 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9767 new_plane_state->fb->width,
9768 new_plane_state->fb->height);
9771 if (new_plane_state->src_w != fb->width << 16 ||
9772 new_plane_state->src_h != fb->height << 16) {
9773 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9777 /* Pitch in pixels */
9778 pitch = fb->pitches[0] / fb->format->cpp[0];
9780 if (fb->width != pitch) {
9781 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9790 /* FB pitch is supported by cursor plane */
9793 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9797 /* Core DRM takes care of checking FB modifiers, so we only need to
9798 * check tiling flags when the FB doesn't have a modifier. */
9799 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9800 if (adev->family < AMDGPU_FAMILY_AI) {
9801 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9802 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9803 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9805 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9808 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9816 static int dm_update_plane_state(struct dc *dc,
9817 struct drm_atomic_state *state,
9818 struct drm_plane *plane,
9819 struct drm_plane_state *old_plane_state,
9820 struct drm_plane_state *new_plane_state,
9822 bool *lock_and_validation_needed)
9825 struct dm_atomic_state *dm_state = NULL;
9826 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9827 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9828 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9829 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9830 struct amdgpu_crtc *new_acrtc;
9835 new_plane_crtc = new_plane_state->crtc;
9836 old_plane_crtc = old_plane_state->crtc;
9837 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9838 dm_old_plane_state = to_dm_plane_state(old_plane_state);
9840 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9841 if (!enable || !new_plane_crtc ||
9842 drm_atomic_plane_disabling(plane->state, new_plane_state))
9845 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9847 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9848 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9852 if (new_plane_state->fb) {
9853 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9854 new_plane_state->fb);
9862 needs_reset = should_reset_plane(state, plane, old_plane_state,
9865 /* Remove any changed/removed planes */
9870 if (!old_plane_crtc)
9873 old_crtc_state = drm_atomic_get_old_crtc_state(
9874 state, old_plane_crtc);
9875 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9877 if (!dm_old_crtc_state->stream)
9880 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9881 plane->base.id, old_plane_crtc->base.id);
9883 ret = dm_atomic_get_state(state, &dm_state);
9887 if (!dc_remove_plane_from_context(
9889 dm_old_crtc_state->stream,
9890 dm_old_plane_state->dc_state,
9891 dm_state->context)) {
9897 dc_plane_state_release(dm_old_plane_state->dc_state);
9898 dm_new_plane_state->dc_state = NULL;
9900 *lock_and_validation_needed = true;
9902 } else { /* Add new planes */
9903 struct dc_plane_state *dc_new_plane_state;
9905 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9908 if (!new_plane_crtc)
9911 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9912 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9914 if (!dm_new_crtc_state->stream)
9920 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9924 WARN_ON(dm_new_plane_state->dc_state);
9926 dc_new_plane_state = dc_create_plane_state(dc);
9927 if (!dc_new_plane_state)
9930 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9931 plane->base.id, new_plane_crtc->base.id);
9933 ret = fill_dc_plane_attributes(
9934 drm_to_adev(new_plane_crtc->dev),
9939 dc_plane_state_release(dc_new_plane_state);
9943 ret = dm_atomic_get_state(state, &dm_state);
9945 dc_plane_state_release(dc_new_plane_state);
9950 * Any atomic check errors that occur after this will
9951 * not need a release. The plane state will be attached
9952 * to the stream, and therefore part of the atomic
9953 * state. It'll be released when the atomic state is
9956 if (!dc_add_plane_to_context(
9958 dm_new_crtc_state->stream,
9960 dm_state->context)) {
9962 dc_plane_state_release(dc_new_plane_state);
9966 dm_new_plane_state->dc_state = dc_new_plane_state;
9968 /* Tell DC to do a full surface update every time there
9969 * is a plane change. Inefficient, but works for now.
9971 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9973 *lock_and_validation_needed = true;
9980 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9981 struct drm_crtc *crtc,
9982 struct drm_crtc_state *new_crtc_state)
9984 struct drm_plane_state *new_cursor_state, *new_primary_state;
9985 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9987 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9988 * cursor per pipe but it's going to inherit the scaling and
9989 * positioning from the underlying pipe. Check the cursor plane's
9990 * blending properties match the primary plane's. */
9992 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9993 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9994 if (!new_cursor_state || !new_primary_state ||
9995 !new_cursor_state->fb || !new_primary_state->fb) {
9999 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10000 (new_cursor_state->src_w >> 16);
10001 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10002 (new_cursor_state->src_h >> 16);
10004 primary_scale_w = new_primary_state->crtc_w * 1000 /
10005 (new_primary_state->src_w >> 16);
10006 primary_scale_h = new_primary_state->crtc_h * 1000 /
10007 (new_primary_state->src_h >> 16);
10009 if (cursor_scale_w != primary_scale_w ||
10010 cursor_scale_h != primary_scale_h) {
10011 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
10018 #if defined(CONFIG_DRM_AMD_DC_DCN)
10019 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10021 struct drm_connector *connector;
10022 struct drm_connector_state *conn_state;
10023 struct amdgpu_dm_connector *aconnector = NULL;
10025 for_each_new_connector_in_state(state, connector, conn_state, i) {
10026 if (conn_state->crtc != crtc)
10029 aconnector = to_amdgpu_dm_connector(connector);
10030 if (!aconnector->port || !aconnector->mst_port)
10039 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10043 static int validate_overlay(struct drm_atomic_state *state)
10046 struct drm_plane *plane;
10047 struct drm_plane_state *old_plane_state, *new_plane_state;
10048 struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL;
10050 /* Check if primary plane is contained inside overlay */
10051 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10052 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10053 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10056 overlay_state = new_plane_state;
10061 /* check if we're making changes to the overlay plane */
10062 if (!overlay_state)
10065 /* check if overlay plane is enabled */
10066 if (!overlay_state->crtc)
10069 /* find the primary plane for the CRTC that the overlay is enabled on */
10070 primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10071 if (IS_ERR(primary_state))
10072 return PTR_ERR(primary_state);
10074 /* check if primary plane is enabled */
10075 if (!primary_state->crtc)
10078 /* check if cursor plane is enabled */
10079 cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor);
10080 if (IS_ERR(cursor_state))
10081 return PTR_ERR(cursor_state);
10083 if (drm_atomic_plane_disabling(plane->state, cursor_state))
10086 /* Perform the bounds check to ensure the overlay plane covers the primary */
10087 if (primary_state->crtc_x < overlay_state->crtc_x ||
10088 primary_state->crtc_y < overlay_state->crtc_y ||
10089 primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10090 primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10091 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10099 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10100 * @dev: The DRM device
10101 * @state: The atomic state to commit
10103 * Validate that the given atomic state is programmable by DC into hardware.
10104 * This involves constructing a &struct dc_state reflecting the new hardware
10105 * state we wish to commit, then querying DC to see if it is programmable. It's
10106 * important not to modify the existing DC state. Otherwise, atomic_check
10107 * may unexpectedly commit hardware changes.
10109 * When validating the DC state, it's important that the right locks are
10110 * acquired. For full updates case which removes/adds/updates streams on one
10111 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10112 * that any such full update commit will wait for completion of any outstanding
10113 * flip using DRMs synchronization events.
10115 * Note that DM adds the affected connectors for all CRTCs in state, when that
10116 * might not seem necessary. This is because DC stream creation requires the
10117 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10118 * be possible but non-trivial - a possible TODO item.
10120 * Return: -Error code if validation failed.
10122 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10123 struct drm_atomic_state *state)
10125 struct amdgpu_device *adev = drm_to_adev(dev);
10126 struct dm_atomic_state *dm_state = NULL;
10127 struct dc *dc = adev->dm.dc;
10128 struct drm_connector *connector;
10129 struct drm_connector_state *old_con_state, *new_con_state;
10130 struct drm_crtc *crtc;
10131 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10132 struct drm_plane *plane;
10133 struct drm_plane_state *old_plane_state, *new_plane_state;
10134 enum dc_status status;
10136 bool lock_and_validation_needed = false;
10137 struct dm_crtc_state *dm_old_crtc_state;
10139 trace_amdgpu_dm_atomic_check_begin(state);
10141 ret = drm_atomic_helper_check_modeset(dev, state);
10145 /* Check connector changes */
10146 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10147 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10148 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10150 /* Skip connectors that are disabled or part of modeset already. */
10151 if (!old_con_state->crtc && !new_con_state->crtc)
10154 if (!new_con_state->crtc)
10157 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10158 if (IS_ERR(new_crtc_state)) {
10159 ret = PTR_ERR(new_crtc_state);
10163 if (dm_old_con_state->abm_level !=
10164 dm_new_con_state->abm_level)
10165 new_crtc_state->connectors_changed = true;
10168 #if defined(CONFIG_DRM_AMD_DC_DCN)
10169 if (dc_resource_is_dsc_encoding_supported(dc)) {
10170 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10171 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10172 ret = add_affected_mst_dsc_crtcs(state, crtc);
10179 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10180 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10182 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10183 !new_crtc_state->color_mgmt_changed &&
10184 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10185 dm_old_crtc_state->dsc_force_changed == false)
10188 if (!new_crtc_state->enable)
10191 ret = drm_atomic_add_affected_connectors(state, crtc);
10195 ret = drm_atomic_add_affected_planes(state, crtc);
10199 if (dm_old_crtc_state->dsc_force_changed)
10200 new_crtc_state->mode_changed = true;
10204 * Add all primary and overlay planes on the CRTC to the state
10205 * whenever a plane is enabled to maintain correct z-ordering
10206 * and to enable fast surface updates.
10208 drm_for_each_crtc(crtc, dev) {
10209 bool modified = false;
10211 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10212 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10215 if (new_plane_state->crtc == crtc ||
10216 old_plane_state->crtc == crtc) {
10225 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10226 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10230 drm_atomic_get_plane_state(state, plane);
10232 if (IS_ERR(new_plane_state)) {
10233 ret = PTR_ERR(new_plane_state);
10239 /* Remove exiting planes if they are modified */
10240 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10241 ret = dm_update_plane_state(dc, state, plane,
10245 &lock_and_validation_needed);
10250 /* Disable all crtcs which require disable */
10251 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10252 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10256 &lock_and_validation_needed);
10261 /* Enable all crtcs which require enable */
10262 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10263 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10267 &lock_and_validation_needed);
10272 ret = validate_overlay(state);
10276 /* Add new/modified planes */
10277 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10278 ret = dm_update_plane_state(dc, state, plane,
10282 &lock_and_validation_needed);
10287 /* Run this here since we want to validate the streams we created */
10288 ret = drm_atomic_helper_check_planes(dev, state);
10292 /* Check cursor planes scaling */
10293 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10294 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10299 if (state->legacy_cursor_update) {
10301 * This is a fast cursor update coming from the plane update
10302 * helper, check if it can be done asynchronously for better
10305 state->async_update =
10306 !drm_atomic_helper_async_check(dev, state);
10309 * Skip the remaining global validation if this is an async
10310 * update. Cursor updates can be done without affecting
10311 * state or bandwidth calcs and this avoids the performance
10312 * penalty of locking the private state object and
10313 * allocating a new dc_state.
10315 if (state->async_update)
10319 /* Check scaling and underscan changes*/
10320 /* TODO Removed scaling changes validation due to inability to commit
10321 * new stream into context w\o causing full reset. Need to
10322 * decide how to handle.
10324 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10325 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10326 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10327 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10329 /* Skip any modesets/resets */
10330 if (!acrtc || drm_atomic_crtc_needs_modeset(
10331 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10334 /* Skip any thing not scale or underscan changes */
10335 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10338 lock_and_validation_needed = true;
10342 * Streams and planes are reset when there are changes that affect
10343 * bandwidth. Anything that affects bandwidth needs to go through
10344 * DC global validation to ensure that the configuration can be applied
10347 * We have to currently stall out here in atomic_check for outstanding
10348 * commits to finish in this case because our IRQ handlers reference
10349 * DRM state directly - we can end up disabling interrupts too early
10352 * TODO: Remove this stall and drop DM state private objects.
10354 if (lock_and_validation_needed) {
10355 ret = dm_atomic_get_state(state, &dm_state);
10359 ret = do_aquire_global_lock(dev, state);
10363 #if defined(CONFIG_DRM_AMD_DC_DCN)
10364 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10367 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10373 * Perform validation of MST topology in the state:
10374 * We need to perform MST atomic check before calling
10375 * dc_validate_global_state(), or there is a chance
10376 * to get stuck in an infinite loop and hang eventually.
10378 ret = drm_dp_mst_atomic_check(state);
10381 status = dc_validate_global_state(dc, dm_state->context, false);
10382 if (status != DC_OK) {
10383 DC_LOG_WARNING("DC global validation failure: %s (%d)",
10384 dc_status_to_str(status), status);
10390 * The commit is a fast update. Fast updates shouldn't change
10391 * the DC context, affect global validation, and can have their
10392 * commit work done in parallel with other commits not touching
10393 * the same resource. If we have a new DC context as part of
10394 * the DM atomic state from validation we need to free it and
10395 * retain the existing one instead.
10397 * Furthermore, since the DM atomic state only contains the DC
10398 * context and can safely be annulled, we can free the state
10399 * and clear the associated private object now to free
10400 * some memory and avoid a possible use-after-free later.
10403 for (i = 0; i < state->num_private_objs; i++) {
10404 struct drm_private_obj *obj = state->private_objs[i].ptr;
10406 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10407 int j = state->num_private_objs-1;
10409 dm_atomic_destroy_state(obj,
10410 state->private_objs[i].state);
10412 /* If i is not at the end of the array then the
10413 * last element needs to be moved to where i was
10414 * before the array can safely be truncated.
10417 state->private_objs[i] =
10418 state->private_objs[j];
10420 state->private_objs[j].ptr = NULL;
10421 state->private_objs[j].state = NULL;
10422 state->private_objs[j].old_state = NULL;
10423 state->private_objs[j].new_state = NULL;
10425 state->num_private_objs = j;
10431 /* Store the overall update type for use later in atomic check. */
10432 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10433 struct dm_crtc_state *dm_new_crtc_state =
10434 to_dm_crtc_state(new_crtc_state);
10436 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10441 /* Must be success */
10444 trace_amdgpu_dm_atomic_check_finish(state, ret);
10449 if (ret == -EDEADLK)
10450 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10451 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10452 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10454 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10456 trace_amdgpu_dm_atomic_check_finish(state, ret);
10461 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10462 struct amdgpu_dm_connector *amdgpu_dm_connector)
10465 bool capable = false;
10467 if (amdgpu_dm_connector->dc_link &&
10468 dm_helpers_dp_read_dpcd(
10470 amdgpu_dm_connector->dc_link,
10471 DP_DOWN_STREAM_PORT_COUNT,
10473 sizeof(dpcd_data))) {
10474 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10480 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10481 uint8_t *edid_ext, int len,
10482 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10485 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10486 struct dc *dc = adev->dm.dc;
10488 /* send extension block to DMCU for parsing */
10489 for (i = 0; i < len; i += 8) {
10493 /* send 8 bytes a time */
10494 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10498 /* EDID block sent completed, expect result */
10499 int version, min_rate, max_rate;
10501 res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10503 /* amd vsdb found */
10504 vsdb_info->freesync_supported = 1;
10505 vsdb_info->amd_vsdb_version = version;
10506 vsdb_info->min_refresh_rate_hz = min_rate;
10507 vsdb_info->max_refresh_rate_hz = max_rate;
10515 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10523 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10524 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10526 uint8_t *edid_ext = NULL;
10528 bool valid_vsdb_found = false;
10530 /*----- drm_find_cea_extension() -----*/
10531 /* No EDID or EDID extensions */
10532 if (edid == NULL || edid->extensions == 0)
10535 /* Find CEA extension */
10536 for (i = 0; i < edid->extensions; i++) {
10537 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10538 if (edid_ext[0] == CEA_EXT)
10542 if (i == edid->extensions)
10545 /*----- cea_db_offsets() -----*/
10546 if (edid_ext[0] != CEA_EXT)
10549 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10551 return valid_vsdb_found ? i : -ENODEV;
10554 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10558 struct detailed_timing *timing;
10559 struct detailed_non_pixel *data;
10560 struct detailed_data_monitor_range *range;
10561 struct amdgpu_dm_connector *amdgpu_dm_connector =
10562 to_amdgpu_dm_connector(connector);
10563 struct dm_connector_state *dm_con_state = NULL;
10565 struct drm_device *dev = connector->dev;
10566 struct amdgpu_device *adev = drm_to_adev(dev);
10567 bool freesync_capable = false;
10568 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10570 if (!connector->state) {
10571 DRM_ERROR("%s - Connector has no state", __func__);
10576 dm_con_state = to_dm_connector_state(connector->state);
10578 amdgpu_dm_connector->min_vfreq = 0;
10579 amdgpu_dm_connector->max_vfreq = 0;
10580 amdgpu_dm_connector->pixel_clock_mhz = 0;
10585 dm_con_state = to_dm_connector_state(connector->state);
10587 if (!amdgpu_dm_connector->dc_sink) {
10588 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10591 if (!adev->dm.freesync_module)
10595 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10596 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10597 bool edid_check_required = false;
10600 edid_check_required = is_dp_capable_without_timing_msa(
10602 amdgpu_dm_connector);
10605 if (edid_check_required == true && (edid->version > 1 ||
10606 (edid->version == 1 && edid->revision > 1))) {
10607 for (i = 0; i < 4; i++) {
10609 timing = &edid->detailed_timings[i];
10610 data = &timing->data.other_data;
10611 range = &data->data.range;
10613 * Check if monitor has continuous frequency mode
10615 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10618 * Check for flag range limits only. If flag == 1 then
10619 * no additional timing information provided.
10620 * Default GTF, GTF Secondary curve and CVT are not
10623 if (range->flags != 1)
10626 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10627 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10628 amdgpu_dm_connector->pixel_clock_mhz =
10629 range->pixel_clock_mhz * 10;
10631 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10632 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10637 if (amdgpu_dm_connector->max_vfreq -
10638 amdgpu_dm_connector->min_vfreq > 10) {
10640 freesync_capable = true;
10643 } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10644 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10645 if (i >= 0 && vsdb_info.freesync_supported) {
10646 timing = &edid->detailed_timings[i];
10647 data = &timing->data.other_data;
10649 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10650 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10651 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10652 freesync_capable = true;
10654 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10655 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10661 dm_con_state->freesync_capable = freesync_capable;
10663 if (connector->vrr_capable_property)
10664 drm_connector_set_vrr_capable_property(connector,
10668 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10670 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10672 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10674 if (link->type == dc_connection_none)
10676 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10677 dpcd_data, sizeof(dpcd_data))) {
10678 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10680 if (dpcd_data[0] == 0) {
10681 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10682 link->psr_settings.psr_feature_enabled = false;
10684 link->psr_settings.psr_version = DC_PSR_VERSION_1;
10685 link->psr_settings.psr_feature_enabled = true;
10688 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10693 * amdgpu_dm_link_setup_psr() - configure psr link
10694 * @stream: stream state
10696 * Return: true if success
10698 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10700 struct dc_link *link = NULL;
10701 struct psr_config psr_config = {0};
10702 struct psr_context psr_context = {0};
10705 if (stream == NULL)
10708 link = stream->link;
10710 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10712 if (psr_config.psr_version > 0) {
10713 psr_config.psr_exit_link_training_required = 0x1;
10714 psr_config.psr_frame_capture_indication_req = 0;
10715 psr_config.psr_rfb_setup_time = 0x37;
10716 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10717 psr_config.allow_smu_optimizations = 0x0;
10719 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10722 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
10728 * amdgpu_dm_psr_enable() - enable psr f/w
10729 * @stream: stream state
10731 * Return: true if success
10733 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10735 struct dc_link *link = stream->link;
10736 unsigned int vsync_rate_hz = 0;
10737 struct dc_static_screen_params params = {0};
10738 /* Calculate number of static frames before generating interrupt to
10741 // Init fail safe of 2 frames static
10742 unsigned int num_frames_static = 2;
10744 DRM_DEBUG_DRIVER("Enabling psr...\n");
10746 vsync_rate_hz = div64_u64(div64_u64((
10747 stream->timing.pix_clk_100hz * 100),
10748 stream->timing.v_total),
10749 stream->timing.h_total);
10752 * Calculate number of frames such that at least 30 ms of time has
10755 if (vsync_rate_hz != 0) {
10756 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10757 num_frames_static = (30000 / frame_time_microsec) + 1;
10760 params.triggers.cursor_update = true;
10761 params.triggers.overlay_update = true;
10762 params.triggers.surface_update = true;
10763 params.num_frames = num_frames_static;
10765 dc_stream_set_static_screen_params(link->ctx->dc,
10769 return dc_link_set_psr_allow_active(link, true, false, false);
10773 * amdgpu_dm_psr_disable() - disable psr f/w
10774 * @stream: stream state
10776 * Return: true if success
10778 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10781 DRM_DEBUG_DRIVER("Disabling psr...\n");
10783 return dc_link_set_psr_allow_active(stream->link, false, true, false);
10787 * amdgpu_dm_psr_disable() - disable psr f/w
10788 * if psr is enabled on any stream
10790 * Return: true if success
10792 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10794 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10795 return dc_set_psr_allow_active(dm->dc, false);
10798 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10800 struct amdgpu_device *adev = drm_to_adev(dev);
10801 struct dc *dc = adev->dm.dc;
10804 mutex_lock(&adev->dm.dc_lock);
10805 if (dc->current_state) {
10806 for (i = 0; i < dc->current_state->stream_count; ++i)
10807 dc->current_state->streams[i]
10808 ->triggered_crtc_reset.enabled =
10809 adev->dm.force_timing_sync;
10811 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10812 dc_trigger_sync(dc, dc->current_state);
10814 mutex_unlock(&adev->dm.dc_lock);
10817 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10818 uint32_t value, const char *func_name)
10820 #ifdef DM_CHECK_ADDR_0
10821 if (address == 0) {
10822 DC_ERR("invalid register write. address = 0");
10826 cgs_write_register(ctx->cgs_device, address, value);
10827 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10830 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10831 const char *func_name)
10834 #ifdef DM_CHECK_ADDR_0
10835 if (address == 0) {
10836 DC_ERR("invalid register read; address = 0\n");
10841 if (ctx->dmub_srv &&
10842 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10843 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10848 value = cgs_read_register(ctx->cgs_device, address);
10850 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10855 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10856 struct aux_payload *payload, enum aux_return_code_type *operation_result)
10858 struct amdgpu_device *adev = ctx->driver_context;
10861 dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10862 ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10864 *operation_result = AUX_RET_ERROR_TIMEOUT;
10867 *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10869 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10870 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10872 // For read case, Copy data to payload
10873 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10874 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10875 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10876 adev->dm.dmub_notify->aux_reply.length);
10879 return adev->dm.dmub_notify->aux_reply.length;