2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
63 #include "amdgpu_dm_psr.h"
65 #include "ivsrcid/ivsrcid_vislands30.h"
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
76 #include <drm/drm_atomic.h>
77 #include <drm/drm_atomic_uapi.h>
78 #include <drm/drm_atomic_helper.h>
79 #include <drm/dp/drm_dp_mst_helper.h>
80 #include <drm/drm_fb_helper.h>
81 #include <drm/drm_fourcc.h>
82 #include <drm/drm_edid.h>
83 #include <drm/drm_vblank.h>
84 #include <drm/drm_audio_component.h>
86 #if defined(CONFIG_DRM_AMD_DC_DCN)
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
94 #include "soc15_common.h"
97 #include "modules/inc/mod_freesync.h"
98 #include "modules/power/power_helpers.h"
99 #include "modules/inc/mod_info_packet.h"
101 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
118 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
119 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
121 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
122 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
124 /* Number of bytes in PSP header for firmware. */
125 #define PSP_HEADER_BYTES 0x100
127 /* Number of bytes in PSP footer for firmware. */
128 #define PSP_FOOTER_BYTES 0x100
133 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
134 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
135 * requests into DC requests, and DC responses into DRM responses.
137 * The root control structure is &struct amdgpu_display_manager.
140 /* basic init/fini API */
141 static int amdgpu_dm_init(struct amdgpu_device *adev);
142 static void amdgpu_dm_fini(struct amdgpu_device *adev);
143 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
145 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
147 switch (link->dpcd_caps.dongle_type) {
148 case DISPLAY_DONGLE_NONE:
149 return DRM_MODE_SUBCONNECTOR_Native;
150 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
151 return DRM_MODE_SUBCONNECTOR_VGA;
152 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
153 case DISPLAY_DONGLE_DP_DVI_DONGLE:
154 return DRM_MODE_SUBCONNECTOR_DVID;
155 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
156 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
157 return DRM_MODE_SUBCONNECTOR_HDMIA;
158 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
160 return DRM_MODE_SUBCONNECTOR_Unknown;
164 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
166 struct dc_link *link = aconnector->dc_link;
167 struct drm_connector *connector = &aconnector->base;
168 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
170 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
173 if (aconnector->dc_sink)
174 subconnector = get_subconnector_type(link);
176 drm_object_property_set_value(&connector->base,
177 connector->dev->mode_config.dp_subconnector_property,
182 * initializes drm_device display related structures, based on the information
183 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
184 * drm_encoder, drm_mode_config
186 * Returns 0 on success
188 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
189 /* removes and deallocates the drm structures, created by the above function */
190 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
192 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
193 struct drm_plane *plane,
194 unsigned long possible_crtcs,
195 const struct dc_plane_cap *plane_cap);
196 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
197 struct drm_plane *plane,
198 uint32_t link_index);
199 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
200 struct amdgpu_dm_connector *amdgpu_dm_connector,
202 struct amdgpu_encoder *amdgpu_encoder);
203 static int amdgpu_dm_encoder_init(struct drm_device *dev,
204 struct amdgpu_encoder *aencoder,
205 uint32_t link_index);
207 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
209 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
211 static int amdgpu_dm_atomic_check(struct drm_device *dev,
212 struct drm_atomic_state *state);
214 static void handle_cursor_update(struct drm_plane *plane,
215 struct drm_plane_state *old_plane_state);
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
220 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
221 static void handle_hpd_rx_irq(void *param);
224 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
225 struct drm_crtc_state *new_crtc_state);
227 * dm_vblank_get_counter
230 * Get counter for number of vertical blanks
233 * struct amdgpu_device *adev - [in] desired amdgpu device
234 * int disp_idx - [in] which CRTC to get the counter from
237 * Counter for vertical blanks
239 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
241 if (crtc >= adev->mode_info.num_crtc)
244 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
246 if (acrtc->dm_irq_params.stream == NULL) {
247 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
252 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
256 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
257 u32 *vbl, u32 *position)
259 uint32_t v_blank_start, v_blank_end, h_position, v_position;
261 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
264 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
266 if (acrtc->dm_irq_params.stream == NULL) {
267 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
273 * TODO rework base driver to use values directly.
274 * for now parse it back into reg-format
276 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
282 *position = v_position | (h_position << 16);
283 *vbl = v_blank_start | (v_blank_end << 16);
289 static bool dm_is_idle(void *handle)
295 static int dm_wait_for_idle(void *handle)
301 static bool dm_check_soft_reset(void *handle)
306 static int dm_soft_reset(void *handle)
312 static struct amdgpu_crtc *
313 get_crtc_by_otg_inst(struct amdgpu_device *adev,
316 struct drm_device *dev = adev_to_drm(adev);
317 struct drm_crtc *crtc;
318 struct amdgpu_crtc *amdgpu_crtc;
320 if (WARN_ON(otg_inst == -1))
321 return adev->mode_info.crtcs[0];
323 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324 amdgpu_crtc = to_amdgpu_crtc(crtc);
326 if (amdgpu_crtc->otg_inst == otg_inst)
333 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
335 return acrtc->dm_irq_params.freesync_config.state ==
336 VRR_STATE_ACTIVE_VARIABLE ||
337 acrtc->dm_irq_params.freesync_config.state ==
338 VRR_STATE_ACTIVE_FIXED;
341 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
343 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
347 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348 struct dm_crtc_state *new_state)
350 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
352 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
359 * dm_pflip_high_irq() - Handle pageflip interrupt
360 * @interrupt_params: ignored
362 * Handles the pageflip interrupt by notifying all interested parties
363 * that the pageflip has been completed.
365 static void dm_pflip_high_irq(void *interrupt_params)
367 struct amdgpu_crtc *amdgpu_crtc;
368 struct common_irq_params *irq_params = interrupt_params;
369 struct amdgpu_device *adev = irq_params->adev;
371 struct drm_pending_vblank_event *e;
372 uint32_t vpos, hpos, v_blank_start, v_blank_end;
375 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
377 /* IRQ could occur when in initial stage */
378 /* TODO work and BO cleanup */
379 if (amdgpu_crtc == NULL) {
380 DC_LOG_PFLIP("CRTC is null, returning.\n");
384 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
386 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
387 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
388 amdgpu_crtc->pflip_status,
389 AMDGPU_FLIP_SUBMITTED,
390 amdgpu_crtc->crtc_id,
392 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
396 /* page flip completed. */
397 e = amdgpu_crtc->event;
398 amdgpu_crtc->event = NULL;
402 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
404 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
406 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
407 &v_blank_end, &hpos, &vpos) ||
408 (vpos < v_blank_start)) {
409 /* Update to correct count and vblank timestamp if racing with
410 * vblank irq. This also updates to the correct vblank timestamp
411 * even in VRR mode, as scanout is past the front-porch atm.
413 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
415 /* Wake up userspace by sending the pageflip event with proper
416 * count and timestamp of vblank of flip completion.
419 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
421 /* Event sent, so done with vblank for this flip */
422 drm_crtc_vblank_put(&amdgpu_crtc->base);
425 /* VRR active and inside front-porch: vblank count and
426 * timestamp for pageflip event will only be up to date after
427 * drm_crtc_handle_vblank() has been executed from late vblank
428 * irq handler after start of back-porch (vline 0). We queue the
429 * pageflip event for send-out by drm_crtc_handle_vblank() with
430 * updated timestamp and count, once it runs after us.
432 * We need to open-code this instead of using the helper
433 * drm_crtc_arm_vblank_event(), as that helper would
434 * call drm_crtc_accurate_vblank_count(), which we must
435 * not call in VRR mode while we are in front-porch!
438 /* sequence will be replaced by real count during send-out. */
439 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440 e->pipe = amdgpu_crtc->crtc_id;
442 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
446 /* Keep track of vblank of this flip for flip throttling. We use the
447 * cooked hw counter, as that one incremented at start of this vblank
448 * of pageflip completion, so last_flip_vblank is the forbidden count
449 * for queueing new pageflips if vsync + VRR is enabled.
451 amdgpu_crtc->dm_irq_params.last_flip_vblank =
452 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
454 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
455 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
457 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458 amdgpu_crtc->crtc_id, amdgpu_crtc,
459 vrr_active, (int) !e);
462 static void dm_vupdate_high_irq(void *interrupt_params)
464 struct common_irq_params *irq_params = interrupt_params;
465 struct amdgpu_device *adev = irq_params->adev;
466 struct amdgpu_crtc *acrtc;
467 struct drm_device *drm_dev;
468 struct drm_vblank_crtc *vblank;
469 ktime_t frame_duration_ns, previous_timestamp;
473 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
476 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
477 drm_dev = acrtc->base.dev;
478 vblank = &drm_dev->vblank[acrtc->base.index];
479 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480 frame_duration_ns = vblank->time - previous_timestamp;
482 if (frame_duration_ns > 0) {
483 trace_amdgpu_refresh_rate_track(acrtc->base.index,
485 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486 atomic64_set(&irq_params->previous_timestamp, vblank->time);
489 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
493 /* Core vblank handling is done here after end of front-porch in
494 * vrr mode, as vblank timestamping will give valid results
495 * while now done after front-porch. This will also deliver
496 * page-flip completion events that have been queued to us
497 * if a pageflip happened inside front-porch.
500 drm_crtc_handle_vblank(&acrtc->base);
502 /* BTR processing for pre-DCE12 ASICs */
503 if (acrtc->dm_irq_params.stream &&
504 adev->family < AMDGPU_FAMILY_AI) {
505 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
506 mod_freesync_handle_v_update(
507 adev->dm.freesync_module,
508 acrtc->dm_irq_params.stream,
509 &acrtc->dm_irq_params.vrr_params);
511 dc_stream_adjust_vmin_vmax(
513 acrtc->dm_irq_params.stream,
514 &acrtc->dm_irq_params.vrr_params.adjust);
515 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
522 * dm_crtc_high_irq() - Handles CRTC interrupt
523 * @interrupt_params: used for determining the CRTC instance
525 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
528 static void dm_crtc_high_irq(void *interrupt_params)
530 struct common_irq_params *irq_params = interrupt_params;
531 struct amdgpu_device *adev = irq_params->adev;
532 struct amdgpu_crtc *acrtc;
536 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
540 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
542 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
543 vrr_active, acrtc->dm_irq_params.active_planes);
546 * Core vblank handling at start of front-porch is only possible
547 * in non-vrr mode, as only there vblank timestamping will give
548 * valid results while done in front-porch. Otherwise defer it
549 * to dm_vupdate_high_irq after end of front-porch.
552 drm_crtc_handle_vblank(&acrtc->base);
555 * Following stuff must happen at start of vblank, for crc
556 * computation and below-the-range btr support in vrr mode.
558 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
560 /* BTR updates need to happen before VUPDATE on Vega and above. */
561 if (adev->family < AMDGPU_FAMILY_AI)
564 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
566 if (acrtc->dm_irq_params.stream &&
567 acrtc->dm_irq_params.vrr_params.supported &&
568 acrtc->dm_irq_params.freesync_config.state ==
569 VRR_STATE_ACTIVE_VARIABLE) {
570 mod_freesync_handle_v_update(adev->dm.freesync_module,
571 acrtc->dm_irq_params.stream,
572 &acrtc->dm_irq_params.vrr_params);
574 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575 &acrtc->dm_irq_params.vrr_params.adjust);
579 * If there aren't any active_planes then DCH HUBP may be clock-gated.
580 * In that case, pageflip completion interrupts won't fire and pageflip
581 * completion events won't get delivered. Prevent this by sending
582 * pending pageflip events from here if a flip is still pending.
584 * If any planes are enabled, use dm_pflip_high_irq() instead, to
585 * avoid race conditions between flip programming and completion,
586 * which could cause too early flip completion events.
588 if (adev->family >= AMDGPU_FAMILY_RV &&
589 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
590 acrtc->dm_irq_params.active_planes == 0) {
592 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
594 drm_crtc_vblank_put(&acrtc->base);
596 acrtc->pflip_status = AMDGPU_FLIP_NONE;
599 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
602 #if defined(CONFIG_DRM_AMD_DC_DCN)
603 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
605 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
606 * DCN generation ASICs
607 * @interrupt_params: interrupt parameters
609 * Used to set crc window/read out crc value at vertical line 0 position
611 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
613 struct common_irq_params *irq_params = interrupt_params;
614 struct amdgpu_device *adev = irq_params->adev;
615 struct amdgpu_crtc *acrtc;
617 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
622 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
624 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
627 * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
628 * @adev: amdgpu_device pointer
629 * @notify: dmub notification structure
631 * Dmub AUX or SET_CONFIG command completion processing callback
632 * Copies dmub notification to DM which is to be read by AUX command.
633 * issuing thread and also signals the event to wake up the thread.
635 void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
637 if (adev->dm.dmub_notify)
638 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
639 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
640 complete(&adev->dm.dmub_aux_transfer_done);
644 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
645 * @adev: amdgpu_device pointer
646 * @notify: dmub notification structure
648 * Dmub Hpd interrupt processing callback. Gets displayindex through the
649 * ink index and calls helper to do the processing.
651 void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
653 struct amdgpu_dm_connector *aconnector;
654 struct amdgpu_dm_connector *hpd_aconnector = NULL;
655 struct drm_connector *connector;
656 struct drm_connector_list_iter iter;
657 struct dc_link *link;
658 uint8_t link_index = 0;
659 struct drm_device *dev = adev->dm.ddev;
664 if (notify == NULL) {
665 DRM_ERROR("DMUB HPD callback notification was NULL");
669 if (notify->link_index > adev->dm.dc->link_count) {
670 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
674 link_index = notify->link_index;
675 link = adev->dm.dc->links[link_index];
677 drm_connector_list_iter_begin(dev, &iter);
678 drm_for_each_connector_iter(connector, &iter) {
679 aconnector = to_amdgpu_dm_connector(connector);
680 if (link && aconnector->dc_link == link) {
681 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
682 hpd_aconnector = aconnector;
686 drm_connector_list_iter_end(&iter);
688 if (hpd_aconnector) {
689 if (notify->type == DMUB_NOTIFICATION_HPD)
690 handle_hpd_irq_helper(hpd_aconnector);
691 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
692 handle_hpd_rx_irq(hpd_aconnector);
697 * register_dmub_notify_callback - Sets callback for DMUB notify
698 * @adev: amdgpu_device pointer
699 * @type: Type of dmub notification
700 * @callback: Dmub interrupt callback function
701 * @dmub_int_thread_offload: offload indicator
703 * API to register a dmub callback handler for a dmub notification
704 * Also sets indicator whether callback processing to be offloaded.
705 * to dmub interrupt handling thread
706 * Return: true if successfully registered, false if there is existing registration
708 bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
709 dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
711 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
712 adev->dm.dmub_callback[type] = callback;
713 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
720 static void dm_handle_hpd_work(struct work_struct *work)
722 struct dmub_hpd_work *dmub_hpd_wrk;
724 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
726 if (!dmub_hpd_wrk->dmub_notify) {
727 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
731 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
732 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
733 dmub_hpd_wrk->dmub_notify);
736 kfree(dmub_hpd_wrk->dmub_notify);
741 #define DMUB_TRACE_MAX_READ 64
743 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
744 * @interrupt_params: used for determining the Outbox instance
746 * Handles the Outbox Interrupt
749 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
751 struct dmub_notification notify;
752 struct common_irq_params *irq_params = interrupt_params;
753 struct amdgpu_device *adev = irq_params->adev;
754 struct amdgpu_display_manager *dm = &adev->dm;
755 struct dmcub_trace_buf_entry entry = { 0 };
757 struct dmub_hpd_work *dmub_hpd_wrk;
758 struct dc_link *plink = NULL;
760 if (dc_enable_dmub_notifications(adev->dm.dc) &&
761 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
764 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify);
765 if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
766 DRM_ERROR("DM: notify type %d invalid!", notify.type);
769 if (!dm->dmub_callback[notify.type]) {
770 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
773 if (dm->dmub_thread_offload[notify.type] == true) {
774 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
776 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
779 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
780 if (!dmub_hpd_wrk->dmub_notify) {
782 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
785 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
786 if (dmub_hpd_wrk->dmub_notify)
787 memcpy(dmub_hpd_wrk->dmub_notify, ¬ify, sizeof(struct dmub_notification));
788 dmub_hpd_wrk->adev = adev;
789 if (notify.type == DMUB_NOTIFICATION_HPD) {
790 plink = adev->dm.dc->links[notify.link_index];
793 notify.hpd_status == DP_HPD_PLUG;
796 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
798 dm->dmub_callback[notify.type](adev, ¬ify);
800 } while (notify.pending_notification);
805 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
806 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
807 entry.param0, entry.param1);
809 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
810 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
816 } while (count <= DMUB_TRACE_MAX_READ);
818 if (count > DMUB_TRACE_MAX_READ)
819 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
821 #endif /* CONFIG_DRM_AMD_DC_DCN */
823 static int dm_set_clockgating_state(void *handle,
824 enum amd_clockgating_state state)
829 static int dm_set_powergating_state(void *handle,
830 enum amd_powergating_state state)
835 /* Prototypes of private functions */
836 static int dm_early_init(void* handle);
838 /* Allocate memory for FBC compressed data */
839 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
841 struct drm_device *dev = connector->dev;
842 struct amdgpu_device *adev = drm_to_adev(dev);
843 struct dm_compressor_info *compressor = &adev->dm.compressor;
844 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
845 struct drm_display_mode *mode;
846 unsigned long max_size = 0;
848 if (adev->dm.dc->fbc_compressor == NULL)
851 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
854 if (compressor->bo_ptr)
858 list_for_each_entry(mode, &connector->modes, head) {
859 if (max_size < mode->htotal * mode->vtotal)
860 max_size = mode->htotal * mode->vtotal;
864 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
865 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
866 &compressor->gpu_addr, &compressor->cpu_addr);
869 DRM_ERROR("DM: Failed to initialize FBC\n");
871 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
872 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
879 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
880 int pipe, bool *enabled,
881 unsigned char *buf, int max_bytes)
883 struct drm_device *dev = dev_get_drvdata(kdev);
884 struct amdgpu_device *adev = drm_to_adev(dev);
885 struct drm_connector *connector;
886 struct drm_connector_list_iter conn_iter;
887 struct amdgpu_dm_connector *aconnector;
892 mutex_lock(&adev->dm.audio_lock);
894 drm_connector_list_iter_begin(dev, &conn_iter);
895 drm_for_each_connector_iter(connector, &conn_iter) {
896 aconnector = to_amdgpu_dm_connector(connector);
897 if (aconnector->audio_inst != port)
901 ret = drm_eld_size(connector->eld);
902 memcpy(buf, connector->eld, min(max_bytes, ret));
906 drm_connector_list_iter_end(&conn_iter);
908 mutex_unlock(&adev->dm.audio_lock);
910 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
915 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
916 .get_eld = amdgpu_dm_audio_component_get_eld,
919 static int amdgpu_dm_audio_component_bind(struct device *kdev,
920 struct device *hda_kdev, void *data)
922 struct drm_device *dev = dev_get_drvdata(kdev);
923 struct amdgpu_device *adev = drm_to_adev(dev);
924 struct drm_audio_component *acomp = data;
926 acomp->ops = &amdgpu_dm_audio_component_ops;
928 adev->dm.audio_component = acomp;
933 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
934 struct device *hda_kdev, void *data)
936 struct drm_device *dev = dev_get_drvdata(kdev);
937 struct amdgpu_device *adev = drm_to_adev(dev);
938 struct drm_audio_component *acomp = data;
942 adev->dm.audio_component = NULL;
945 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
946 .bind = amdgpu_dm_audio_component_bind,
947 .unbind = amdgpu_dm_audio_component_unbind,
950 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
957 adev->mode_info.audio.enabled = true;
959 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
961 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
962 adev->mode_info.audio.pin[i].channels = -1;
963 adev->mode_info.audio.pin[i].rate = -1;
964 adev->mode_info.audio.pin[i].bits_per_sample = -1;
965 adev->mode_info.audio.pin[i].status_bits = 0;
966 adev->mode_info.audio.pin[i].category_code = 0;
967 adev->mode_info.audio.pin[i].connected = false;
968 adev->mode_info.audio.pin[i].id =
969 adev->dm.dc->res_pool->audios[i]->inst;
970 adev->mode_info.audio.pin[i].offset = 0;
973 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
977 adev->dm.audio_registered = true;
982 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
987 if (!adev->mode_info.audio.enabled)
990 if (adev->dm.audio_registered) {
991 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
992 adev->dm.audio_registered = false;
995 /* TODO: Disable audio? */
997 adev->mode_info.audio.enabled = false;
1000 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1002 struct drm_audio_component *acomp = adev->dm.audio_component;
1004 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1005 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1007 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1012 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1014 const struct dmcub_firmware_header_v1_0 *hdr;
1015 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1016 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1017 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1018 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1019 struct abm *abm = adev->dm.dc->res_pool->abm;
1020 struct dmub_srv_hw_params hw_params;
1021 enum dmub_status status;
1022 const unsigned char *fw_inst_const, *fw_bss_data;
1023 uint32_t i, fw_inst_const_size, fw_bss_data_size;
1024 bool has_hw_support;
1025 struct dc *dc = adev->dm.dc;
1028 /* DMUB isn't supported on the ASIC. */
1032 DRM_ERROR("No framebuffer info for DMUB service.\n");
1037 /* Firmware required for DMUB support. */
1038 DRM_ERROR("No firmware provided for DMUB.\n");
1042 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1043 if (status != DMUB_STATUS_OK) {
1044 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1048 if (!has_hw_support) {
1049 DRM_INFO("DMUB unsupported on ASIC\n");
1053 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1055 fw_inst_const = dmub_fw->data +
1056 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1059 fw_bss_data = dmub_fw->data +
1060 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1061 le32_to_cpu(hdr->inst_const_bytes);
1063 /* Copy firmware and bios info into FB memory. */
1064 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1065 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1067 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1069 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1070 * amdgpu_ucode_init_single_fw will load dmub firmware
1071 * fw_inst_const part to cw0; otherwise, the firmware back door load
1072 * will be done by dm_dmub_hw_init
1074 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1075 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1076 fw_inst_const_size);
1079 if (fw_bss_data_size)
1080 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1081 fw_bss_data, fw_bss_data_size);
1083 /* Copy firmware bios info into FB memory. */
1084 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1087 /* Reset regions that need to be reset. */
1088 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1089 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1091 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1092 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1094 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1095 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1097 /* Initialize hardware. */
1098 memset(&hw_params, 0, sizeof(hw_params));
1099 hw_params.fb_base = adev->gmc.fb_start;
1100 hw_params.fb_offset = adev->gmc.aper_base;
1102 /* backdoor load firmware and trigger dmub running */
1103 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1104 hw_params.load_inst_const = true;
1107 hw_params.psp_version = dmcu->psp_version;
1109 for (i = 0; i < fb_info->num_fb; ++i)
1110 hw_params.fb[i] = &fb_info->fb[i];
1112 switch (adev->asic_type) {
1113 case CHIP_YELLOW_CARP:
1114 if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
1115 hw_params.dpia_supported = true;
1116 #if defined(CONFIG_DRM_AMD_DC_DCN)
1117 hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
1125 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1126 if (status != DMUB_STATUS_OK) {
1127 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1131 /* Wait for firmware load to finish. */
1132 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1133 if (status != DMUB_STATUS_OK)
1134 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1136 /* Init DMCU and ABM if available. */
1138 dmcu->funcs->dmcu_init(dmcu);
1139 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1142 if (!adev->dm.dc->ctx->dmub_srv)
1143 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1144 if (!adev->dm.dc->ctx->dmub_srv) {
1145 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1149 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1150 adev->dm.dmcub_fw_version);
1155 #if defined(CONFIG_DRM_AMD_DC_DCN)
1156 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1159 uint32_t logical_addr_low;
1160 uint32_t logical_addr_high;
1161 uint32_t agp_base, agp_bot, agp_top;
1162 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1164 memset(pa_config, 0, sizeof(*pa_config));
1166 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1167 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1169 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1171 * Raven2 has a HW issue that it is unable to use the vram which
1172 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1173 * workaround that increase system aperture high address (add 1)
1174 * to get rid of the VM fault and hardware hang.
1176 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1178 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1181 agp_bot = adev->gmc.agp_start >> 24;
1182 agp_top = adev->gmc.agp_end >> 24;
1185 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1186 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1187 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1188 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1189 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1190 page_table_base.low_part = lower_32_bits(pt_base);
1192 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1193 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1195 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1196 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1197 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1199 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1200 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1201 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1203 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1204 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1205 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1207 pa_config->is_hvm_enabled = 0;
1211 #if defined(CONFIG_DRM_AMD_DC_DCN)
1212 static void vblank_control_worker(struct work_struct *work)
1214 struct vblank_control_work *vblank_work =
1215 container_of(work, struct vblank_control_work, work);
1216 struct amdgpu_display_manager *dm = vblank_work->dm;
1218 mutex_lock(&dm->dc_lock);
1220 if (vblank_work->enable)
1221 dm->active_vblank_irq_count++;
1222 else if(dm->active_vblank_irq_count)
1223 dm->active_vblank_irq_count--;
1225 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1227 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1229 /* Control PSR based on vblank requirements from OS */
1230 if (vblank_work->stream && vblank_work->stream->link) {
1231 if (vblank_work->enable) {
1232 if (vblank_work->stream->link->psr_settings.psr_allow_active)
1233 amdgpu_dm_psr_disable(vblank_work->stream);
1234 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1235 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1236 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1237 amdgpu_dm_psr_enable(vblank_work->stream);
1241 mutex_unlock(&dm->dc_lock);
1243 dc_stream_release(vblank_work->stream);
1250 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1252 struct hpd_rx_irq_offload_work *offload_work;
1253 struct amdgpu_dm_connector *aconnector;
1254 struct dc_link *dc_link;
1255 struct amdgpu_device *adev;
1256 enum dc_connection_type new_connection_type = dc_connection_none;
1257 unsigned long flags;
1259 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1260 aconnector = offload_work->offload_wq->aconnector;
1263 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1267 adev = drm_to_adev(aconnector->base.dev);
1268 dc_link = aconnector->dc_link;
1270 mutex_lock(&aconnector->hpd_lock);
1271 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1272 DRM_ERROR("KMS: Failed to detect connector\n");
1273 mutex_unlock(&aconnector->hpd_lock);
1275 if (new_connection_type == dc_connection_none)
1278 if (amdgpu_in_reset(adev))
1281 mutex_lock(&adev->dm.dc_lock);
1282 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1283 dc_link_dp_handle_automated_test(dc_link);
1284 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1285 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1286 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1287 dc_link_dp_handle_link_loss(dc_link);
1288 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1289 offload_work->offload_wq->is_handling_link_loss = false;
1290 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1292 mutex_unlock(&adev->dm.dc_lock);
1295 kfree(offload_work);
1299 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1301 int max_caps = dc->caps.max_links;
1303 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1305 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1307 if (!hpd_rx_offload_wq)
1311 for (i = 0; i < max_caps; i++) {
1312 hpd_rx_offload_wq[i].wq =
1313 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1315 if (hpd_rx_offload_wq[i].wq == NULL) {
1316 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1320 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1323 return hpd_rx_offload_wq;
1326 struct amdgpu_stutter_quirk {
1334 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1335 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1336 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1340 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1342 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1344 while (p && p->chip_device != 0) {
1345 if (pdev->vendor == p->chip_vendor &&
1346 pdev->device == p->chip_device &&
1347 pdev->subsystem_vendor == p->subsys_vendor &&
1348 pdev->subsystem_device == p->subsys_device &&
1349 pdev->revision == p->revision) {
1357 static int amdgpu_dm_init(struct amdgpu_device *adev)
1359 struct dc_init_data init_data;
1360 #ifdef CONFIG_DRM_AMD_DC_HDCP
1361 struct dc_callback_init init_params;
1365 adev->dm.ddev = adev_to_drm(adev);
1366 adev->dm.adev = adev;
1368 /* Zero all the fields */
1369 memset(&init_data, 0, sizeof(init_data));
1370 #ifdef CONFIG_DRM_AMD_DC_HDCP
1371 memset(&init_params, 0, sizeof(init_params));
1374 mutex_init(&adev->dm.dc_lock);
1375 mutex_init(&adev->dm.audio_lock);
1376 #if defined(CONFIG_DRM_AMD_DC_DCN)
1377 spin_lock_init(&adev->dm.vblank_lock);
1380 if(amdgpu_dm_irq_init(adev)) {
1381 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1385 init_data.asic_id.chip_family = adev->family;
1387 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1388 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1389 init_data.asic_id.chip_id = adev->pdev->device;
1391 init_data.asic_id.vram_width = adev->gmc.vram_width;
1392 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1393 init_data.asic_id.atombios_base_address =
1394 adev->mode_info.atom_context->bios;
1396 init_data.driver = adev;
1398 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1400 if (!adev->dm.cgs_device) {
1401 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1405 init_data.cgs_device = adev->dm.cgs_device;
1407 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1409 switch (adev->asic_type) {
1412 init_data.flags.gpu_vm_support = true;
1415 switch (adev->ip_versions[DCE_HWIP][0]) {
1416 case IP_VERSION(2, 1, 0):
1417 init_data.flags.gpu_vm_support = true;
1418 switch (adev->dm.dmcub_fw_version) {
1419 case 0: /* development */
1420 case 0x1: /* linux-firmware.git hash 6d9f399 */
1421 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1422 init_data.flags.disable_dmcu = false;
1425 init_data.flags.disable_dmcu = true;
1428 case IP_VERSION(1, 0, 0):
1429 case IP_VERSION(1, 0, 1):
1430 case IP_VERSION(3, 0, 1):
1431 case IP_VERSION(3, 1, 2):
1432 case IP_VERSION(3, 1, 3):
1433 init_data.flags.gpu_vm_support = true;
1435 case IP_VERSION(2, 0, 3):
1436 init_data.flags.disable_dmcu = true;
1444 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1445 init_data.flags.fbc_support = true;
1447 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1448 init_data.flags.multi_mon_pp_mclk_switch = true;
1450 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1451 init_data.flags.disable_fractional_pwm = true;
1453 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1454 init_data.flags.edp_no_power_sequencing = true;
1456 init_data.flags.power_down_display_on_boot = true;
1458 if (check_seamless_boot_capability(adev)) {
1459 init_data.flags.power_down_display_on_boot = false;
1460 init_data.flags.allow_seamless_boot_optimization = true;
1461 DRM_INFO("Seamless boot condition check passed\n");
1464 INIT_LIST_HEAD(&adev->dm.da_list);
1465 /* Display Core create. */
1466 adev->dm.dc = dc_create(&init_data);
1469 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1471 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1475 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1476 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1477 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1480 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1481 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1482 if (dm_should_disable_stutter(adev->pdev))
1483 adev->dm.dc->debug.disable_stutter = true;
1485 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1486 adev->dm.dc->debug.disable_stutter = true;
1488 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1489 adev->dm.dc->debug.disable_dsc = true;
1490 adev->dm.dc->debug.disable_dsc_edp = true;
1493 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1494 adev->dm.dc->debug.disable_clock_gate = true;
1496 r = dm_dmub_hw_init(adev);
1498 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1502 dc_hardware_init(adev->dm.dc);
1504 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1505 if (!adev->dm.hpd_rx_offload_wq) {
1506 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1510 #if defined(CONFIG_DRM_AMD_DC_DCN)
1511 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1512 struct dc_phy_addr_space_config pa_config;
1514 mmhub_read_system_context(adev, &pa_config);
1516 // Call the DC init_memory func
1517 dc_setup_system_context(adev->dm.dc, &pa_config);
1521 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1522 if (!adev->dm.freesync_module) {
1524 "amdgpu: failed to initialize freesync_module.\n");
1526 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1527 adev->dm.freesync_module);
1529 amdgpu_dm_init_color_mod();
1531 #if defined(CONFIG_DRM_AMD_DC_DCN)
1532 if (adev->dm.dc->caps.max_links > 0) {
1533 adev->dm.vblank_control_workqueue =
1534 create_singlethread_workqueue("dm_vblank_control_workqueue");
1535 if (!adev->dm.vblank_control_workqueue)
1536 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1540 #ifdef CONFIG_DRM_AMD_DC_HDCP
1541 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1542 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1544 if (!adev->dm.hdcp_workqueue)
1545 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1547 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1549 dc_init_callbacks(adev->dm.dc, &init_params);
1552 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1553 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1555 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1556 init_completion(&adev->dm.dmub_aux_transfer_done);
1557 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1558 if (!adev->dm.dmub_notify) {
1559 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1563 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1564 if (!adev->dm.delayed_hpd_wq) {
1565 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1569 amdgpu_dm_outbox_init(adev);
1570 #if defined(CONFIG_DRM_AMD_DC_DCN)
1571 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1572 dmub_aux_setconfig_callback, false)) {
1573 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1576 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1577 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1580 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1581 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1584 #endif /* CONFIG_DRM_AMD_DC_DCN */
1587 if (amdgpu_dm_initialize_drm_device(adev)) {
1589 "amdgpu: failed to initialize sw for display support.\n");
1593 /* create fake encoders for MST */
1594 dm_dp_create_fake_mst_encoders(adev);
1596 /* TODO: Add_display_info? */
1598 /* TODO use dynamic cursor width */
1599 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1600 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1602 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1604 "amdgpu: failed to initialize sw for display support.\n");
1609 DRM_DEBUG_DRIVER("KMS initialized.\n");
1613 amdgpu_dm_fini(adev);
1618 static int amdgpu_dm_early_fini(void *handle)
1620 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1622 amdgpu_dm_audio_fini(adev);
1627 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1631 #if defined(CONFIG_DRM_AMD_DC_DCN)
1632 if (adev->dm.vblank_control_workqueue) {
1633 destroy_workqueue(adev->dm.vblank_control_workqueue);
1634 adev->dm.vblank_control_workqueue = NULL;
1638 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1639 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1642 amdgpu_dm_destroy_drm_device(&adev->dm);
1644 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1645 if (adev->dm.crc_rd_wrk) {
1646 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1647 kfree(adev->dm.crc_rd_wrk);
1648 adev->dm.crc_rd_wrk = NULL;
1651 #ifdef CONFIG_DRM_AMD_DC_HDCP
1652 if (adev->dm.hdcp_workqueue) {
1653 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1654 adev->dm.hdcp_workqueue = NULL;
1658 dc_deinit_callbacks(adev->dm.dc);
1661 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1663 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1664 kfree(adev->dm.dmub_notify);
1665 adev->dm.dmub_notify = NULL;
1666 destroy_workqueue(adev->dm.delayed_hpd_wq);
1667 adev->dm.delayed_hpd_wq = NULL;
1670 if (adev->dm.dmub_bo)
1671 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1672 &adev->dm.dmub_bo_gpu_addr,
1673 &adev->dm.dmub_bo_cpu_addr);
1675 if (adev->dm.hpd_rx_offload_wq) {
1676 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1677 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1678 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1679 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1683 kfree(adev->dm.hpd_rx_offload_wq);
1684 adev->dm.hpd_rx_offload_wq = NULL;
1687 /* DC Destroy TODO: Replace destroy DAL */
1689 dc_destroy(&adev->dm.dc);
1691 * TODO: pageflip, vlank interrupt
1693 * amdgpu_dm_irq_fini(adev);
1696 if (adev->dm.cgs_device) {
1697 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1698 adev->dm.cgs_device = NULL;
1700 if (adev->dm.freesync_module) {
1701 mod_freesync_destroy(adev->dm.freesync_module);
1702 adev->dm.freesync_module = NULL;
1705 mutex_destroy(&adev->dm.audio_lock);
1706 mutex_destroy(&adev->dm.dc_lock);
1711 static int load_dmcu_fw(struct amdgpu_device *adev)
1713 const char *fw_name_dmcu = NULL;
1715 const struct dmcu_firmware_header_v1_0 *hdr;
1717 switch(adev->asic_type) {
1718 #if defined(CONFIG_DRM_AMD_DC_SI)
1733 case CHIP_POLARIS11:
1734 case CHIP_POLARIS10:
1735 case CHIP_POLARIS12:
1742 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1745 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1746 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1747 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1748 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1753 switch (adev->ip_versions[DCE_HWIP][0]) {
1754 case IP_VERSION(2, 0, 2):
1755 case IP_VERSION(2, 0, 3):
1756 case IP_VERSION(2, 0, 0):
1757 case IP_VERSION(2, 1, 0):
1758 case IP_VERSION(3, 0, 0):
1759 case IP_VERSION(3, 0, 2):
1760 case IP_VERSION(3, 0, 3):
1761 case IP_VERSION(3, 0, 1):
1762 case IP_VERSION(3, 1, 2):
1763 case IP_VERSION(3, 1, 3):
1768 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1772 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1773 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1777 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1779 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1780 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1781 adev->dm.fw_dmcu = NULL;
1785 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1790 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1792 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1794 release_firmware(adev->dm.fw_dmcu);
1795 adev->dm.fw_dmcu = NULL;
1799 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1800 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1801 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1802 adev->firmware.fw_size +=
1803 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1805 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1806 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1807 adev->firmware.fw_size +=
1808 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1810 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1812 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1817 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1819 struct amdgpu_device *adev = ctx;
1821 return dm_read_reg(adev->dm.dc->ctx, address);
1824 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1827 struct amdgpu_device *adev = ctx;
1829 return dm_write_reg(adev->dm.dc->ctx, address, value);
1832 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1834 struct dmub_srv_create_params create_params;
1835 struct dmub_srv_region_params region_params;
1836 struct dmub_srv_region_info region_info;
1837 struct dmub_srv_fb_params fb_params;
1838 struct dmub_srv_fb_info *fb_info;
1839 struct dmub_srv *dmub_srv;
1840 const struct dmcub_firmware_header_v1_0 *hdr;
1841 const char *fw_name_dmub;
1842 enum dmub_asic dmub_asic;
1843 enum dmub_status status;
1846 switch (adev->ip_versions[DCE_HWIP][0]) {
1847 case IP_VERSION(2, 1, 0):
1848 dmub_asic = DMUB_ASIC_DCN21;
1849 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1850 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1851 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1853 case IP_VERSION(3, 0, 0):
1854 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1855 dmub_asic = DMUB_ASIC_DCN30;
1856 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1858 dmub_asic = DMUB_ASIC_DCN30;
1859 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1862 case IP_VERSION(3, 0, 1):
1863 dmub_asic = DMUB_ASIC_DCN301;
1864 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1866 case IP_VERSION(3, 0, 2):
1867 dmub_asic = DMUB_ASIC_DCN302;
1868 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1870 case IP_VERSION(3, 0, 3):
1871 dmub_asic = DMUB_ASIC_DCN303;
1872 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1874 case IP_VERSION(3, 1, 2):
1875 case IP_VERSION(3, 1, 3):
1876 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1877 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1881 /* ASIC doesn't support DMUB. */
1885 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1887 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1891 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1893 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1897 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1898 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1900 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1901 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1902 AMDGPU_UCODE_ID_DMCUB;
1903 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1905 adev->firmware.fw_size +=
1906 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1908 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1909 adev->dm.dmcub_fw_version);
1913 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1914 dmub_srv = adev->dm.dmub_srv;
1917 DRM_ERROR("Failed to allocate DMUB service!\n");
1921 memset(&create_params, 0, sizeof(create_params));
1922 create_params.user_ctx = adev;
1923 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1924 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1925 create_params.asic = dmub_asic;
1927 /* Create the DMUB service. */
1928 status = dmub_srv_create(dmub_srv, &create_params);
1929 if (status != DMUB_STATUS_OK) {
1930 DRM_ERROR("Error creating DMUB service: %d\n", status);
1934 /* Calculate the size of all the regions for the DMUB service. */
1935 memset(®ion_params, 0, sizeof(region_params));
1937 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1938 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1939 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1940 region_params.vbios_size = adev->bios_size;
1941 region_params.fw_bss_data = region_params.bss_data_size ?
1942 adev->dm.dmub_fw->data +
1943 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1944 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1945 region_params.fw_inst_const =
1946 adev->dm.dmub_fw->data +
1947 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1950 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1953 if (status != DMUB_STATUS_OK) {
1954 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1959 * Allocate a framebuffer based on the total size of all the regions.
1960 * TODO: Move this into GART.
1962 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1963 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1964 &adev->dm.dmub_bo_gpu_addr,
1965 &adev->dm.dmub_bo_cpu_addr);
1969 /* Rebase the regions on the framebuffer address. */
1970 memset(&fb_params, 0, sizeof(fb_params));
1971 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1972 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1973 fb_params.region_info = ®ion_info;
1975 adev->dm.dmub_fb_info =
1976 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1977 fb_info = adev->dm.dmub_fb_info;
1981 "Failed to allocate framebuffer info for DMUB service!\n");
1985 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1986 if (status != DMUB_STATUS_OK) {
1987 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1994 static int dm_sw_init(void *handle)
1996 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1999 r = dm_dmub_sw_init(adev);
2003 return load_dmcu_fw(adev);
2006 static int dm_sw_fini(void *handle)
2008 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2010 kfree(adev->dm.dmub_fb_info);
2011 adev->dm.dmub_fb_info = NULL;
2013 if (adev->dm.dmub_srv) {
2014 dmub_srv_destroy(adev->dm.dmub_srv);
2015 adev->dm.dmub_srv = NULL;
2018 release_firmware(adev->dm.dmub_fw);
2019 adev->dm.dmub_fw = NULL;
2021 release_firmware(adev->dm.fw_dmcu);
2022 adev->dm.fw_dmcu = NULL;
2027 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2029 struct amdgpu_dm_connector *aconnector;
2030 struct drm_connector *connector;
2031 struct drm_connector_list_iter iter;
2034 drm_connector_list_iter_begin(dev, &iter);
2035 drm_for_each_connector_iter(connector, &iter) {
2036 aconnector = to_amdgpu_dm_connector(connector);
2037 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2038 aconnector->mst_mgr.aux) {
2039 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2041 aconnector->base.base.id);
2043 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2045 DRM_ERROR("DM_MST: Failed to start MST\n");
2046 aconnector->dc_link->type =
2047 dc_connection_single;
2052 drm_connector_list_iter_end(&iter);
2057 static int dm_late_init(void *handle)
2059 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2061 struct dmcu_iram_parameters params;
2062 unsigned int linear_lut[16];
2064 struct dmcu *dmcu = NULL;
2066 dmcu = adev->dm.dc->res_pool->dmcu;
2068 for (i = 0; i < 16; i++)
2069 linear_lut[i] = 0xFFFF * i / 15;
2072 params.backlight_ramping_override = false;
2073 params.backlight_ramping_start = 0xCCCC;
2074 params.backlight_ramping_reduction = 0xCCCCCCCC;
2075 params.backlight_lut_array_size = 16;
2076 params.backlight_lut_array = linear_lut;
2078 /* Min backlight level after ABM reduction, Don't allow below 1%
2079 * 0xFFFF x 0.01 = 0x28F
2081 params.min_abm_backlight = 0x28F;
2082 /* In the case where abm is implemented on dmcub,
2083 * dmcu object will be null.
2084 * ABM 2.4 and up are implemented on dmcub.
2087 if (!dmcu_load_iram(dmcu, params))
2089 } else if (adev->dm.dc->ctx->dmub_srv) {
2090 struct dc_link *edp_links[MAX_NUM_EDP];
2093 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2094 for (i = 0; i < edp_num; i++) {
2095 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2100 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2103 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2105 struct amdgpu_dm_connector *aconnector;
2106 struct drm_connector *connector;
2107 struct drm_connector_list_iter iter;
2108 struct drm_dp_mst_topology_mgr *mgr;
2110 bool need_hotplug = false;
2112 drm_connector_list_iter_begin(dev, &iter);
2113 drm_for_each_connector_iter(connector, &iter) {
2114 aconnector = to_amdgpu_dm_connector(connector);
2115 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2116 aconnector->mst_port)
2119 mgr = &aconnector->mst_mgr;
2122 drm_dp_mst_topology_mgr_suspend(mgr);
2124 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2126 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2127 need_hotplug = true;
2131 drm_connector_list_iter_end(&iter);
2134 drm_kms_helper_hotplug_event(dev);
2137 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2139 struct smu_context *smu = &adev->smu;
2142 if (!is_support_sw_smu(adev))
2145 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2146 * on window driver dc implementation.
2147 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2148 * should be passed to smu during boot up and resume from s3.
2149 * boot up: dc calculate dcn watermark clock settings within dc_create,
2150 * dcn20_resource_construct
2151 * then call pplib functions below to pass the settings to smu:
2152 * smu_set_watermarks_for_clock_ranges
2153 * smu_set_watermarks_table
2154 * navi10_set_watermarks_table
2155 * smu_write_watermarks_table
2157 * For Renoir, clock settings of dcn watermark are also fixed values.
2158 * dc has implemented different flow for window driver:
2159 * dc_hardware_init / dc_set_power_state
2164 * smu_set_watermarks_for_clock_ranges
2165 * renoir_set_watermarks_table
2166 * smu_write_watermarks_table
2169 * dc_hardware_init -> amdgpu_dm_init
2170 * dc_set_power_state --> dm_resume
2172 * therefore, this function apply to navi10/12/14 but not Renoir
2175 switch (adev->ip_versions[DCE_HWIP][0]) {
2176 case IP_VERSION(2, 0, 2):
2177 case IP_VERSION(2, 0, 0):
2183 ret = smu_write_watermarks_table(smu);
2185 DRM_ERROR("Failed to update WMTABLE!\n");
2193 * dm_hw_init() - Initialize DC device
2194 * @handle: The base driver device containing the amdgpu_dm device.
2196 * Initialize the &struct amdgpu_display_manager device. This involves calling
2197 * the initializers of each DM component, then populating the struct with them.
2199 * Although the function implies hardware initialization, both hardware and
2200 * software are initialized here. Splitting them out to their relevant init
2201 * hooks is a future TODO item.
2203 * Some notable things that are initialized here:
2205 * - Display Core, both software and hardware
2206 * - DC modules that we need (freesync and color management)
2207 * - DRM software states
2208 * - Interrupt sources and handlers
2210 * - Debug FS entries, if enabled
2212 static int dm_hw_init(void *handle)
2214 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2215 /* Create DAL display manager */
2216 amdgpu_dm_init(adev);
2217 amdgpu_dm_hpd_init(adev);
2223 * dm_hw_fini() - Teardown DC device
2224 * @handle: The base driver device containing the amdgpu_dm device.
2226 * Teardown components within &struct amdgpu_display_manager that require
2227 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2228 * were loaded. Also flush IRQ workqueues and disable them.
2230 static int dm_hw_fini(void *handle)
2232 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2234 amdgpu_dm_hpd_fini(adev);
2236 amdgpu_dm_irq_fini(adev);
2237 amdgpu_dm_fini(adev);
2242 static int dm_enable_vblank(struct drm_crtc *crtc);
2243 static void dm_disable_vblank(struct drm_crtc *crtc);
2245 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2246 struct dc_state *state, bool enable)
2248 enum dc_irq_source irq_source;
2249 struct amdgpu_crtc *acrtc;
2253 for (i = 0; i < state->stream_count; i++) {
2254 acrtc = get_crtc_by_otg_inst(
2255 adev, state->stream_status[i].primary_otg_inst);
2257 if (acrtc && state->stream_status[i].plane_count != 0) {
2258 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2259 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2260 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2261 acrtc->crtc_id, enable ? "en" : "dis", rc);
2263 DRM_WARN("Failed to %s pflip interrupts\n",
2264 enable ? "enable" : "disable");
2267 rc = dm_enable_vblank(&acrtc->base);
2269 DRM_WARN("Failed to enable vblank interrupts\n");
2271 dm_disable_vblank(&acrtc->base);
2279 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2281 struct dc_state *context = NULL;
2282 enum dc_status res = DC_ERROR_UNEXPECTED;
2284 struct dc_stream_state *del_streams[MAX_PIPES];
2285 int del_streams_count = 0;
2287 memset(del_streams, 0, sizeof(del_streams));
2289 context = dc_create_state(dc);
2290 if (context == NULL)
2291 goto context_alloc_fail;
2293 dc_resource_state_copy_construct_current(dc, context);
2295 /* First remove from context all streams */
2296 for (i = 0; i < context->stream_count; i++) {
2297 struct dc_stream_state *stream = context->streams[i];
2299 del_streams[del_streams_count++] = stream;
2302 /* Remove all planes for removed streams and then remove the streams */
2303 for (i = 0; i < del_streams_count; i++) {
2304 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2305 res = DC_FAIL_DETACH_SURFACES;
2309 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2314 res = dc_commit_state(dc, context);
2317 dc_release_state(context);
2323 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2327 if (dm->hpd_rx_offload_wq) {
2328 for (i = 0; i < dm->dc->caps.max_links; i++)
2329 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2333 static int dm_suspend(void *handle)
2335 struct amdgpu_device *adev = handle;
2336 struct amdgpu_display_manager *dm = &adev->dm;
2339 if (amdgpu_in_reset(adev)) {
2340 mutex_lock(&dm->dc_lock);
2342 #if defined(CONFIG_DRM_AMD_DC_DCN)
2343 dc_allow_idle_optimizations(adev->dm.dc, false);
2346 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2348 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2350 amdgpu_dm_commit_zero_streams(dm->dc);
2352 amdgpu_dm_irq_suspend(adev);
2354 hpd_rx_irq_work_suspend(dm);
2359 WARN_ON(adev->dm.cached_state);
2360 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2362 s3_handle_mst(adev_to_drm(adev), true);
2364 amdgpu_dm_irq_suspend(adev);
2366 hpd_rx_irq_work_suspend(dm);
2368 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2373 static struct amdgpu_dm_connector *
2374 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2375 struct drm_crtc *crtc)
2378 struct drm_connector_state *new_con_state;
2379 struct drm_connector *connector;
2380 struct drm_crtc *crtc_from_state;
2382 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2383 crtc_from_state = new_con_state->crtc;
2385 if (crtc_from_state == crtc)
2386 return to_amdgpu_dm_connector(connector);
2392 static void emulated_link_detect(struct dc_link *link)
2394 struct dc_sink_init_data sink_init_data = { 0 };
2395 struct display_sink_capability sink_caps = { 0 };
2396 enum dc_edid_status edid_status;
2397 struct dc_context *dc_ctx = link->ctx;
2398 struct dc_sink *sink = NULL;
2399 struct dc_sink *prev_sink = NULL;
2401 link->type = dc_connection_none;
2402 prev_sink = link->local_sink;
2405 dc_sink_release(prev_sink);
2407 switch (link->connector_signal) {
2408 case SIGNAL_TYPE_HDMI_TYPE_A: {
2409 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2410 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2414 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2415 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2416 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2420 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2421 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2422 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2426 case SIGNAL_TYPE_LVDS: {
2427 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2428 sink_caps.signal = SIGNAL_TYPE_LVDS;
2432 case SIGNAL_TYPE_EDP: {
2433 sink_caps.transaction_type =
2434 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2435 sink_caps.signal = SIGNAL_TYPE_EDP;
2439 case SIGNAL_TYPE_DISPLAY_PORT: {
2440 sink_caps.transaction_type =
2441 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2442 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2447 DC_ERROR("Invalid connector type! signal:%d\n",
2448 link->connector_signal);
2452 sink_init_data.link = link;
2453 sink_init_data.sink_signal = sink_caps.signal;
2455 sink = dc_sink_create(&sink_init_data);
2457 DC_ERROR("Failed to create sink!\n");
2461 /* dc_sink_create returns a new reference */
2462 link->local_sink = sink;
2464 edid_status = dm_helpers_read_local_edid(
2469 if (edid_status != EDID_OK)
2470 DC_ERROR("Failed to read EDID");
2474 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2475 struct amdgpu_display_manager *dm)
2478 struct dc_surface_update surface_updates[MAX_SURFACES];
2479 struct dc_plane_info plane_infos[MAX_SURFACES];
2480 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2481 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2482 struct dc_stream_update stream_update;
2486 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2489 dm_error("Failed to allocate update bundle\n");
2493 for (k = 0; k < dc_state->stream_count; k++) {
2494 bundle->stream_update.stream = dc_state->streams[k];
2496 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2497 bundle->surface_updates[m].surface =
2498 dc_state->stream_status->plane_states[m];
2499 bundle->surface_updates[m].surface->force_full_update =
2502 dc_commit_updates_for_stream(
2503 dm->dc, bundle->surface_updates,
2504 dc_state->stream_status->plane_count,
2505 dc_state->streams[k], &bundle->stream_update, dc_state);
2514 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2516 struct dc_stream_state *stream_state;
2517 struct amdgpu_dm_connector *aconnector = link->priv;
2518 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2519 struct dc_stream_update stream_update;
2520 bool dpms_off = true;
2522 memset(&stream_update, 0, sizeof(stream_update));
2523 stream_update.dpms_off = &dpms_off;
2525 mutex_lock(&adev->dm.dc_lock);
2526 stream_state = dc_stream_find_from_link(link);
2528 if (stream_state == NULL) {
2529 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2530 mutex_unlock(&adev->dm.dc_lock);
2534 stream_update.stream = stream_state;
2535 acrtc_state->force_dpms_off = true;
2536 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2537 stream_state, &stream_update,
2538 stream_state->ctx->dc->current_state);
2539 mutex_unlock(&adev->dm.dc_lock);
2542 static int dm_resume(void *handle)
2544 struct amdgpu_device *adev = handle;
2545 struct drm_device *ddev = adev_to_drm(adev);
2546 struct amdgpu_display_manager *dm = &adev->dm;
2547 struct amdgpu_dm_connector *aconnector;
2548 struct drm_connector *connector;
2549 struct drm_connector_list_iter iter;
2550 struct drm_crtc *crtc;
2551 struct drm_crtc_state *new_crtc_state;
2552 struct dm_crtc_state *dm_new_crtc_state;
2553 struct drm_plane *plane;
2554 struct drm_plane_state *new_plane_state;
2555 struct dm_plane_state *dm_new_plane_state;
2556 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2557 enum dc_connection_type new_connection_type = dc_connection_none;
2558 struct dc_state *dc_state;
2561 if (amdgpu_in_reset(adev)) {
2562 dc_state = dm->cached_dc_state;
2565 * The dc->current_state is backed up into dm->cached_dc_state
2566 * before we commit 0 streams.
2568 * DC will clear link encoder assignments on the real state
2569 * but the changes won't propagate over to the copy we made
2570 * before the 0 streams commit.
2572 * DC expects that link encoder assignments are *not* valid
2573 * when committing a state, so as a workaround it needs to be
2576 link_enc_cfg_init(dm->dc, dc_state);
2578 if (dc_enable_dmub_notifications(adev->dm.dc))
2579 amdgpu_dm_outbox_init(adev);
2581 r = dm_dmub_hw_init(adev);
2583 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2585 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2588 amdgpu_dm_irq_resume_early(adev);
2590 for (i = 0; i < dc_state->stream_count; i++) {
2591 dc_state->streams[i]->mode_changed = true;
2592 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2593 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2598 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2600 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2602 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2604 dc_release_state(dm->cached_dc_state);
2605 dm->cached_dc_state = NULL;
2607 amdgpu_dm_irq_resume_late(adev);
2609 mutex_unlock(&dm->dc_lock);
2613 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2614 dc_release_state(dm_state->context);
2615 dm_state->context = dc_create_state(dm->dc);
2616 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2617 dc_resource_state_construct(dm->dc, dm_state->context);
2619 /* Re-enable outbox interrupts for DPIA. */
2620 if (dc_enable_dmub_notifications(adev->dm.dc))
2621 amdgpu_dm_outbox_init(adev);
2623 /* Before powering on DC we need to re-initialize DMUB. */
2624 r = dm_dmub_hw_init(adev);
2626 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2628 /* power on hardware */
2629 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2631 /* program HPD filter */
2635 * early enable HPD Rx IRQ, should be done before set mode as short
2636 * pulse interrupts are used for MST
2638 amdgpu_dm_irq_resume_early(adev);
2640 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2641 s3_handle_mst(ddev, false);
2644 drm_connector_list_iter_begin(ddev, &iter);
2645 drm_for_each_connector_iter(connector, &iter) {
2646 aconnector = to_amdgpu_dm_connector(connector);
2649 * this is the case when traversing through already created
2650 * MST connectors, should be skipped
2652 if (aconnector->mst_port)
2655 mutex_lock(&aconnector->hpd_lock);
2656 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2657 DRM_ERROR("KMS: Failed to detect connector\n");
2659 if (aconnector->base.force && new_connection_type == dc_connection_none)
2660 emulated_link_detect(aconnector->dc_link);
2662 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2664 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2665 aconnector->fake_enable = false;
2667 if (aconnector->dc_sink)
2668 dc_sink_release(aconnector->dc_sink);
2669 aconnector->dc_sink = NULL;
2670 amdgpu_dm_update_connector_after_detect(aconnector);
2671 mutex_unlock(&aconnector->hpd_lock);
2673 drm_connector_list_iter_end(&iter);
2675 /* Force mode set in atomic commit */
2676 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2677 new_crtc_state->active_changed = true;
2680 * atomic_check is expected to create the dc states. We need to release
2681 * them here, since they were duplicated as part of the suspend
2684 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2685 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2686 if (dm_new_crtc_state->stream) {
2687 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2688 dc_stream_release(dm_new_crtc_state->stream);
2689 dm_new_crtc_state->stream = NULL;
2693 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2694 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2695 if (dm_new_plane_state->dc_state) {
2696 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2697 dc_plane_state_release(dm_new_plane_state->dc_state);
2698 dm_new_plane_state->dc_state = NULL;
2702 drm_atomic_helper_resume(ddev, dm->cached_state);
2704 dm->cached_state = NULL;
2706 amdgpu_dm_irq_resume_late(adev);
2708 amdgpu_dm_smu_write_watermarks_table(adev);
2716 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2717 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2718 * the base driver's device list to be initialized and torn down accordingly.
2720 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2723 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2725 .early_init = dm_early_init,
2726 .late_init = dm_late_init,
2727 .sw_init = dm_sw_init,
2728 .sw_fini = dm_sw_fini,
2729 .early_fini = amdgpu_dm_early_fini,
2730 .hw_init = dm_hw_init,
2731 .hw_fini = dm_hw_fini,
2732 .suspend = dm_suspend,
2733 .resume = dm_resume,
2734 .is_idle = dm_is_idle,
2735 .wait_for_idle = dm_wait_for_idle,
2736 .check_soft_reset = dm_check_soft_reset,
2737 .soft_reset = dm_soft_reset,
2738 .set_clockgating_state = dm_set_clockgating_state,
2739 .set_powergating_state = dm_set_powergating_state,
2742 const struct amdgpu_ip_block_version dm_ip_block =
2744 .type = AMD_IP_BLOCK_TYPE_DCE,
2748 .funcs = &amdgpu_dm_funcs,
2758 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2759 .fb_create = amdgpu_display_user_framebuffer_create,
2760 .get_format_info = amd_get_format_info,
2761 .output_poll_changed = drm_fb_helper_output_poll_changed,
2762 .atomic_check = amdgpu_dm_atomic_check,
2763 .atomic_commit = drm_atomic_helper_commit,
2766 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2767 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2770 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2772 u32 max_cll, min_cll, max, min, q, r;
2773 struct amdgpu_dm_backlight_caps *caps;
2774 struct amdgpu_display_manager *dm;
2775 struct drm_connector *conn_base;
2776 struct amdgpu_device *adev;
2777 struct dc_link *link = NULL;
2778 static const u8 pre_computed_values[] = {
2779 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2780 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2783 if (!aconnector || !aconnector->dc_link)
2786 link = aconnector->dc_link;
2787 if (link->connector_signal != SIGNAL_TYPE_EDP)
2790 conn_base = &aconnector->base;
2791 adev = drm_to_adev(conn_base->dev);
2793 for (i = 0; i < dm->num_of_edps; i++) {
2794 if (link == dm->backlight_link[i])
2797 if (i >= dm->num_of_edps)
2799 caps = &dm->backlight_caps[i];
2800 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2801 caps->aux_support = false;
2802 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2803 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2805 if (caps->ext_caps->bits.oled == 1 /*||
2806 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2807 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2808 caps->aux_support = true;
2810 if (amdgpu_backlight == 0)
2811 caps->aux_support = false;
2812 else if (amdgpu_backlight == 1)
2813 caps->aux_support = true;
2815 /* From the specification (CTA-861-G), for calculating the maximum
2816 * luminance we need to use:
2817 * Luminance = 50*2**(CV/32)
2818 * Where CV is a one-byte value.
2819 * For calculating this expression we may need float point precision;
2820 * to avoid this complexity level, we take advantage that CV is divided
2821 * by a constant. From the Euclids division algorithm, we know that CV
2822 * can be written as: CV = 32*q + r. Next, we replace CV in the
2823 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2824 * need to pre-compute the value of r/32. For pre-computing the values
2825 * We just used the following Ruby line:
2826 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2827 * The results of the above expressions can be verified at
2828 * pre_computed_values.
2832 max = (1 << q) * pre_computed_values[r];
2834 // min luminance: maxLum * (CV/255)^2 / 100
2835 q = DIV_ROUND_CLOSEST(min_cll, 255);
2836 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2838 caps->aux_max_input_signal = max;
2839 caps->aux_min_input_signal = min;
2842 void amdgpu_dm_update_connector_after_detect(
2843 struct amdgpu_dm_connector *aconnector)
2845 struct drm_connector *connector = &aconnector->base;
2846 struct drm_device *dev = connector->dev;
2847 struct dc_sink *sink;
2849 /* MST handled by drm_mst framework */
2850 if (aconnector->mst_mgr.mst_state == true)
2853 sink = aconnector->dc_link->local_sink;
2855 dc_sink_retain(sink);
2858 * Edid mgmt connector gets first update only in mode_valid hook and then
2859 * the connector sink is set to either fake or physical sink depends on link status.
2860 * Skip if already done during boot.
2862 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2863 && aconnector->dc_em_sink) {
2866 * For S3 resume with headless use eml_sink to fake stream
2867 * because on resume connector->sink is set to NULL
2869 mutex_lock(&dev->mode_config.mutex);
2872 if (aconnector->dc_sink) {
2873 amdgpu_dm_update_freesync_caps(connector, NULL);
2875 * retain and release below are used to
2876 * bump up refcount for sink because the link doesn't point
2877 * to it anymore after disconnect, so on next crtc to connector
2878 * reshuffle by UMD we will get into unwanted dc_sink release
2880 dc_sink_release(aconnector->dc_sink);
2882 aconnector->dc_sink = sink;
2883 dc_sink_retain(aconnector->dc_sink);
2884 amdgpu_dm_update_freesync_caps(connector,
2887 amdgpu_dm_update_freesync_caps(connector, NULL);
2888 if (!aconnector->dc_sink) {
2889 aconnector->dc_sink = aconnector->dc_em_sink;
2890 dc_sink_retain(aconnector->dc_sink);
2894 mutex_unlock(&dev->mode_config.mutex);
2897 dc_sink_release(sink);
2902 * TODO: temporary guard to look for proper fix
2903 * if this sink is MST sink, we should not do anything
2905 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2906 dc_sink_release(sink);
2910 if (aconnector->dc_sink == sink) {
2912 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2915 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2916 aconnector->connector_id);
2918 dc_sink_release(sink);
2922 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2923 aconnector->connector_id, aconnector->dc_sink, sink);
2925 mutex_lock(&dev->mode_config.mutex);
2928 * 1. Update status of the drm connector
2929 * 2. Send an event and let userspace tell us what to do
2933 * TODO: check if we still need the S3 mode update workaround.
2934 * If yes, put it here.
2936 if (aconnector->dc_sink) {
2937 amdgpu_dm_update_freesync_caps(connector, NULL);
2938 dc_sink_release(aconnector->dc_sink);
2941 aconnector->dc_sink = sink;
2942 dc_sink_retain(aconnector->dc_sink);
2943 if (sink->dc_edid.length == 0) {
2944 aconnector->edid = NULL;
2945 if (aconnector->dc_link->aux_mode) {
2946 drm_dp_cec_unset_edid(
2947 &aconnector->dm_dp_aux.aux);
2951 (struct edid *)sink->dc_edid.raw_edid;
2953 drm_connector_update_edid_property(connector,
2955 if (aconnector->dc_link->aux_mode)
2956 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2960 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2961 update_connector_ext_caps(aconnector);
2963 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2964 amdgpu_dm_update_freesync_caps(connector, NULL);
2965 drm_connector_update_edid_property(connector, NULL);
2966 aconnector->num_modes = 0;
2967 dc_sink_release(aconnector->dc_sink);
2968 aconnector->dc_sink = NULL;
2969 aconnector->edid = NULL;
2970 #ifdef CONFIG_DRM_AMD_DC_HDCP
2971 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2972 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2973 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2977 mutex_unlock(&dev->mode_config.mutex);
2979 update_subconnector_property(aconnector);
2982 dc_sink_release(sink);
2985 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
2987 struct drm_connector *connector = &aconnector->base;
2988 struct drm_device *dev = connector->dev;
2989 enum dc_connection_type new_connection_type = dc_connection_none;
2990 struct amdgpu_device *adev = drm_to_adev(dev);
2991 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2992 struct dm_crtc_state *dm_crtc_state = NULL;
2994 if (adev->dm.disable_hpd_irq)
2997 if (dm_con_state->base.state && dm_con_state->base.crtc)
2998 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
2999 dm_con_state->base.state,
3000 dm_con_state->base.crtc));
3002 * In case of failure or MST no need to update connector status or notify the OS
3003 * since (for MST case) MST does this in its own context.
3005 mutex_lock(&aconnector->hpd_lock);
3007 #ifdef CONFIG_DRM_AMD_DC_HDCP
3008 if (adev->dm.hdcp_workqueue) {
3009 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3010 dm_con_state->update_hdcp = true;
3013 if (aconnector->fake_enable)
3014 aconnector->fake_enable = false;
3016 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3017 DRM_ERROR("KMS: Failed to detect connector\n");
3019 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3020 emulated_link_detect(aconnector->dc_link);
3022 drm_modeset_lock_all(dev);
3023 dm_restore_drm_connector_state(dev, connector);
3024 drm_modeset_unlock_all(dev);
3026 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3027 drm_kms_helper_connector_hotplug_event(connector);
3029 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3030 if (new_connection_type == dc_connection_none &&
3031 aconnector->dc_link->type == dc_connection_none &&
3033 dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3035 amdgpu_dm_update_connector_after_detect(aconnector);
3037 drm_modeset_lock_all(dev);
3038 dm_restore_drm_connector_state(dev, connector);
3039 drm_modeset_unlock_all(dev);
3041 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3042 drm_kms_helper_connector_hotplug_event(connector);
3044 mutex_unlock(&aconnector->hpd_lock);
3048 static void handle_hpd_irq(void *param)
3050 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3052 handle_hpd_irq_helper(aconnector);
3056 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3058 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3060 bool new_irq_handled = false;
3062 int dpcd_bytes_to_read;
3064 const int max_process_count = 30;
3065 int process_count = 0;
3067 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3069 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3070 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3071 /* DPCD 0x200 - 0x201 for downstream IRQ */
3072 dpcd_addr = DP_SINK_COUNT;
3074 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3075 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3076 dpcd_addr = DP_SINK_COUNT_ESI;
3079 dret = drm_dp_dpcd_read(
3080 &aconnector->dm_dp_aux.aux,
3083 dpcd_bytes_to_read);
3085 while (dret == dpcd_bytes_to_read &&
3086 process_count < max_process_count) {
3092 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3093 /* handle HPD short pulse irq */
3094 if (aconnector->mst_mgr.mst_state)
3096 &aconnector->mst_mgr,
3100 if (new_irq_handled) {
3101 /* ACK at DPCD to notify down stream */
3102 const int ack_dpcd_bytes_to_write =
3103 dpcd_bytes_to_read - 1;
3105 for (retry = 0; retry < 3; retry++) {
3108 wret = drm_dp_dpcd_write(
3109 &aconnector->dm_dp_aux.aux,
3112 ack_dpcd_bytes_to_write);
3113 if (wret == ack_dpcd_bytes_to_write)
3117 /* check if there is new irq to be handled */
3118 dret = drm_dp_dpcd_read(
3119 &aconnector->dm_dp_aux.aux,
3122 dpcd_bytes_to_read);
3124 new_irq_handled = false;
3130 if (process_count == max_process_count)
3131 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3134 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3135 union hpd_irq_data hpd_irq_data)
3137 struct hpd_rx_irq_offload_work *offload_work =
3138 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3140 if (!offload_work) {
3141 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3145 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3146 offload_work->data = hpd_irq_data;
3147 offload_work->offload_wq = offload_wq;
3149 queue_work(offload_wq->wq, &offload_work->work);
3150 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3153 static void handle_hpd_rx_irq(void *param)
3155 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3156 struct drm_connector *connector = &aconnector->base;
3157 struct drm_device *dev = connector->dev;
3158 struct dc_link *dc_link = aconnector->dc_link;
3159 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3160 bool result = false;
3161 enum dc_connection_type new_connection_type = dc_connection_none;
3162 struct amdgpu_device *adev = drm_to_adev(dev);
3163 union hpd_irq_data hpd_irq_data;
3164 bool link_loss = false;
3165 bool has_left_work = false;
3166 int idx = aconnector->base.index;
3167 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3169 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3171 if (adev->dm.disable_hpd_irq)
3175 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3176 * conflict, after implement i2c helper, this mutex should be
3179 mutex_lock(&aconnector->hpd_lock);
3181 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3182 &link_loss, true, &has_left_work);
3187 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3188 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3192 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3193 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3194 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3195 dm_handle_mst_sideband_msg(aconnector);
3202 spin_lock(&offload_wq->offload_lock);
3203 skip = offload_wq->is_handling_link_loss;
3206 offload_wq->is_handling_link_loss = true;
3208 spin_unlock(&offload_wq->offload_lock);
3211 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3218 if (result && !is_mst_root_connector) {
3219 /* Downstream Port status changed. */
3220 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3221 DRM_ERROR("KMS: Failed to detect connector\n");
3223 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3224 emulated_link_detect(dc_link);
3226 if (aconnector->fake_enable)
3227 aconnector->fake_enable = false;
3229 amdgpu_dm_update_connector_after_detect(aconnector);
3232 drm_modeset_lock_all(dev);
3233 dm_restore_drm_connector_state(dev, connector);
3234 drm_modeset_unlock_all(dev);
3236 drm_kms_helper_connector_hotplug_event(connector);
3237 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3239 if (aconnector->fake_enable)
3240 aconnector->fake_enable = false;
3242 amdgpu_dm_update_connector_after_detect(aconnector);
3245 drm_modeset_lock_all(dev);
3246 dm_restore_drm_connector_state(dev, connector);
3247 drm_modeset_unlock_all(dev);
3249 drm_kms_helper_connector_hotplug_event(connector);
3252 #ifdef CONFIG_DRM_AMD_DC_HDCP
3253 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3254 if (adev->dm.hdcp_workqueue)
3255 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3259 if (dc_link->type != dc_connection_mst_branch)
3260 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3262 mutex_unlock(&aconnector->hpd_lock);
3265 static void register_hpd_handlers(struct amdgpu_device *adev)
3267 struct drm_device *dev = adev_to_drm(adev);
3268 struct drm_connector *connector;
3269 struct amdgpu_dm_connector *aconnector;
3270 const struct dc_link *dc_link;
3271 struct dc_interrupt_params int_params = {0};
3273 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3274 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3276 list_for_each_entry(connector,
3277 &dev->mode_config.connector_list, head) {
3279 aconnector = to_amdgpu_dm_connector(connector);
3280 dc_link = aconnector->dc_link;
3282 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3283 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3284 int_params.irq_source = dc_link->irq_source_hpd;
3286 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3288 (void *) aconnector);
3291 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3293 /* Also register for DP short pulse (hpd_rx). */
3294 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3295 int_params.irq_source = dc_link->irq_source_hpd_rx;
3297 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3299 (void *) aconnector);
3301 if (adev->dm.hpd_rx_offload_wq)
3302 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3308 #if defined(CONFIG_DRM_AMD_DC_SI)
3309 /* Register IRQ sources and initialize IRQ callbacks */
3310 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3312 struct dc *dc = adev->dm.dc;
3313 struct common_irq_params *c_irq_params;
3314 struct dc_interrupt_params int_params = {0};
3317 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3319 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3320 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3323 * Actions of amdgpu_irq_add_id():
3324 * 1. Register a set() function with base driver.
3325 * Base driver will call set() function to enable/disable an
3326 * interrupt in DC hardware.
3327 * 2. Register amdgpu_dm_irq_handler().
3328 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3329 * coming from DC hardware.
3330 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3331 * for acknowledging and handling. */
3333 /* Use VBLANK interrupt */
3334 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3335 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3337 DRM_ERROR("Failed to add crtc irq id!\n");
3341 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3342 int_params.irq_source =
3343 dc_interrupt_to_irq_source(dc, i+1 , 0);
3345 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3347 c_irq_params->adev = adev;
3348 c_irq_params->irq_src = int_params.irq_source;
3350 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3351 dm_crtc_high_irq, c_irq_params);
3354 /* Use GRPH_PFLIP interrupt */
3355 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3356 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3357 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3359 DRM_ERROR("Failed to add page flip irq id!\n");
3363 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3364 int_params.irq_source =
3365 dc_interrupt_to_irq_source(dc, i, 0);
3367 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3369 c_irq_params->adev = adev;
3370 c_irq_params->irq_src = int_params.irq_source;
3372 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3373 dm_pflip_high_irq, c_irq_params);
3378 r = amdgpu_irq_add_id(adev, client_id,
3379 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3381 DRM_ERROR("Failed to add hpd irq id!\n");
3385 register_hpd_handlers(adev);
3391 /* Register IRQ sources and initialize IRQ callbacks */
3392 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3394 struct dc *dc = adev->dm.dc;
3395 struct common_irq_params *c_irq_params;
3396 struct dc_interrupt_params int_params = {0};
3399 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3401 if (adev->family >= AMDGPU_FAMILY_AI)
3402 client_id = SOC15_IH_CLIENTID_DCE;
3404 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3405 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3408 * Actions of amdgpu_irq_add_id():
3409 * 1. Register a set() function with base driver.
3410 * Base driver will call set() function to enable/disable an
3411 * interrupt in DC hardware.
3412 * 2. Register amdgpu_dm_irq_handler().
3413 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3414 * coming from DC hardware.
3415 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3416 * for acknowledging and handling. */
3418 /* Use VBLANK interrupt */
3419 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3420 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3422 DRM_ERROR("Failed to add crtc irq id!\n");
3426 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3427 int_params.irq_source =
3428 dc_interrupt_to_irq_source(dc, i, 0);
3430 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3432 c_irq_params->adev = adev;
3433 c_irq_params->irq_src = int_params.irq_source;
3435 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3436 dm_crtc_high_irq, c_irq_params);
3439 /* Use VUPDATE interrupt */
3440 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3441 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3443 DRM_ERROR("Failed to add vupdate irq id!\n");
3447 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3448 int_params.irq_source =
3449 dc_interrupt_to_irq_source(dc, i, 0);
3451 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3453 c_irq_params->adev = adev;
3454 c_irq_params->irq_src = int_params.irq_source;
3456 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3457 dm_vupdate_high_irq, c_irq_params);
3460 /* Use GRPH_PFLIP interrupt */
3461 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3462 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3463 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3465 DRM_ERROR("Failed to add page flip irq id!\n");
3469 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3470 int_params.irq_source =
3471 dc_interrupt_to_irq_source(dc, i, 0);
3473 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3475 c_irq_params->adev = adev;
3476 c_irq_params->irq_src = int_params.irq_source;
3478 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3479 dm_pflip_high_irq, c_irq_params);
3484 r = amdgpu_irq_add_id(adev, client_id,
3485 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3487 DRM_ERROR("Failed to add hpd irq id!\n");
3491 register_hpd_handlers(adev);
3496 #if defined(CONFIG_DRM_AMD_DC_DCN)
3497 /* Register IRQ sources and initialize IRQ callbacks */
3498 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3500 struct dc *dc = adev->dm.dc;
3501 struct common_irq_params *c_irq_params;
3502 struct dc_interrupt_params int_params = {0};
3505 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3506 static const unsigned int vrtl_int_srcid[] = {
3507 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3508 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3509 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3510 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3511 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3512 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3516 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3517 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3520 * Actions of amdgpu_irq_add_id():
3521 * 1. Register a set() function with base driver.
3522 * Base driver will call set() function to enable/disable an
3523 * interrupt in DC hardware.
3524 * 2. Register amdgpu_dm_irq_handler().
3525 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3526 * coming from DC hardware.
3527 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3528 * for acknowledging and handling.
3531 /* Use VSTARTUP interrupt */
3532 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3533 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3535 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3538 DRM_ERROR("Failed to add crtc irq id!\n");
3542 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3543 int_params.irq_source =
3544 dc_interrupt_to_irq_source(dc, i, 0);
3546 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3548 c_irq_params->adev = adev;
3549 c_irq_params->irq_src = int_params.irq_source;
3551 amdgpu_dm_irq_register_interrupt(
3552 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3555 /* Use otg vertical line interrupt */
3556 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3557 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3558 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3559 vrtl_int_srcid[i], &adev->vline0_irq);
3562 DRM_ERROR("Failed to add vline0 irq id!\n");
3566 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3567 int_params.irq_source =
3568 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3570 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3571 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3575 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3576 - DC_IRQ_SOURCE_DC1_VLINE0];
3578 c_irq_params->adev = adev;
3579 c_irq_params->irq_src = int_params.irq_source;
3581 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3582 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3586 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3587 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3588 * to trigger at end of each vblank, regardless of state of the lock,
3589 * matching DCE behaviour.
3591 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3592 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3594 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3597 DRM_ERROR("Failed to add vupdate irq id!\n");
3601 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3602 int_params.irq_source =
3603 dc_interrupt_to_irq_source(dc, i, 0);
3605 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3607 c_irq_params->adev = adev;
3608 c_irq_params->irq_src = int_params.irq_source;
3610 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3611 dm_vupdate_high_irq, c_irq_params);
3614 /* Use GRPH_PFLIP interrupt */
3615 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3616 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3618 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3620 DRM_ERROR("Failed to add page flip irq id!\n");
3624 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3625 int_params.irq_source =
3626 dc_interrupt_to_irq_source(dc, i, 0);
3628 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3630 c_irq_params->adev = adev;
3631 c_irq_params->irq_src = int_params.irq_source;
3633 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3634 dm_pflip_high_irq, c_irq_params);
3639 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3642 DRM_ERROR("Failed to add hpd irq id!\n");
3646 register_hpd_handlers(adev);
3650 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3651 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3653 struct dc *dc = adev->dm.dc;
3654 struct common_irq_params *c_irq_params;
3655 struct dc_interrupt_params int_params = {0};
3658 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3659 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3661 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3662 &adev->dmub_outbox_irq);
3664 DRM_ERROR("Failed to add outbox irq id!\n");
3668 if (dc->ctx->dmub_srv) {
3669 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3670 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3671 int_params.irq_source =
3672 dc_interrupt_to_irq_source(dc, i, 0);
3674 c_irq_params = &adev->dm.dmub_outbox_params[0];
3676 c_irq_params->adev = adev;
3677 c_irq_params->irq_src = int_params.irq_source;
3679 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3680 dm_dmub_outbox1_low_irq, c_irq_params);
3688 * Acquires the lock for the atomic state object and returns
3689 * the new atomic state.
3691 * This should only be called during atomic check.
3693 static int dm_atomic_get_state(struct drm_atomic_state *state,
3694 struct dm_atomic_state **dm_state)
3696 struct drm_device *dev = state->dev;
3697 struct amdgpu_device *adev = drm_to_adev(dev);
3698 struct amdgpu_display_manager *dm = &adev->dm;
3699 struct drm_private_state *priv_state;
3704 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3705 if (IS_ERR(priv_state))
3706 return PTR_ERR(priv_state);
3708 *dm_state = to_dm_atomic_state(priv_state);
3713 static struct dm_atomic_state *
3714 dm_atomic_get_new_state(struct drm_atomic_state *state)
3716 struct drm_device *dev = state->dev;
3717 struct amdgpu_device *adev = drm_to_adev(dev);
3718 struct amdgpu_display_manager *dm = &adev->dm;
3719 struct drm_private_obj *obj;
3720 struct drm_private_state *new_obj_state;
3723 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3724 if (obj->funcs == dm->atomic_obj.funcs)
3725 return to_dm_atomic_state(new_obj_state);
3731 static struct drm_private_state *
3732 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3734 struct dm_atomic_state *old_state, *new_state;
3736 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3740 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3742 old_state = to_dm_atomic_state(obj->state);
3744 if (old_state && old_state->context)
3745 new_state->context = dc_copy_state(old_state->context);
3747 if (!new_state->context) {
3752 return &new_state->base;
3755 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3756 struct drm_private_state *state)
3758 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3760 if (dm_state && dm_state->context)
3761 dc_release_state(dm_state->context);
3766 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3767 .atomic_duplicate_state = dm_atomic_duplicate_state,
3768 .atomic_destroy_state = dm_atomic_destroy_state,
3771 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3773 struct dm_atomic_state *state;
3776 adev->mode_info.mode_config_initialized = true;
3778 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3779 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3781 adev_to_drm(adev)->mode_config.max_width = 16384;
3782 adev_to_drm(adev)->mode_config.max_height = 16384;
3784 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3785 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3786 /* indicates support for immediate flip */
3787 adev_to_drm(adev)->mode_config.async_page_flip = true;
3789 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3791 state = kzalloc(sizeof(*state), GFP_KERNEL);
3795 state->context = dc_create_state(adev->dm.dc);
3796 if (!state->context) {
3801 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3803 drm_atomic_private_obj_init(adev_to_drm(adev),
3804 &adev->dm.atomic_obj,
3806 &dm_atomic_state_funcs);
3808 r = amdgpu_display_modeset_create_props(adev);
3810 dc_release_state(state->context);
3815 r = amdgpu_dm_audio_init(adev);
3817 dc_release_state(state->context);
3825 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3826 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3827 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3829 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3830 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3832 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3835 #if defined(CONFIG_ACPI)
3836 struct amdgpu_dm_backlight_caps caps;
3838 memset(&caps, 0, sizeof(caps));
3840 if (dm->backlight_caps[bl_idx].caps_valid)
3843 amdgpu_acpi_get_backlight_caps(&caps);
3844 if (caps.caps_valid) {
3845 dm->backlight_caps[bl_idx].caps_valid = true;
3846 if (caps.aux_support)
3848 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3849 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3851 dm->backlight_caps[bl_idx].min_input_signal =
3852 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3853 dm->backlight_caps[bl_idx].max_input_signal =
3854 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3857 if (dm->backlight_caps[bl_idx].aux_support)
3860 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3861 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3865 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3866 unsigned *min, unsigned *max)
3871 if (caps->aux_support) {
3872 // Firmware limits are in nits, DC API wants millinits.
3873 *max = 1000 * caps->aux_max_input_signal;
3874 *min = 1000 * caps->aux_min_input_signal;
3876 // Firmware limits are 8-bit, PWM control is 16-bit.
3877 *max = 0x101 * caps->max_input_signal;
3878 *min = 0x101 * caps->min_input_signal;
3883 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3884 uint32_t brightness)
3888 if (!get_brightness_range(caps, &min, &max))
3891 // Rescale 0..255 to min..max
3892 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3893 AMDGPU_MAX_BL_LEVEL);
3896 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3897 uint32_t brightness)
3901 if (!get_brightness_range(caps, &min, &max))
3904 if (brightness < min)
3906 // Rescale min..max to 0..255
3907 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3911 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3913 u32 user_brightness)
3915 struct amdgpu_dm_backlight_caps caps;
3916 struct dc_link *link;
3920 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3921 caps = dm->backlight_caps[bl_idx];
3923 dm->brightness[bl_idx] = user_brightness;
3924 /* update scratch register */
3926 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3927 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3928 link = (struct dc_link *)dm->backlight_link[bl_idx];
3930 /* Change brightness based on AUX property */
3931 if (caps.aux_support) {
3932 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3933 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3935 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3937 rc = dc_link_set_backlight_level(link, brightness, 0);
3939 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3945 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3947 struct amdgpu_display_manager *dm = bl_get_data(bd);
3950 for (i = 0; i < dm->num_of_edps; i++) {
3951 if (bd == dm->backlight_dev[i])
3954 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3956 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3961 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3964 struct amdgpu_dm_backlight_caps caps;
3965 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3967 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3968 caps = dm->backlight_caps[bl_idx];
3970 if (caps.aux_support) {
3974 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3976 return dm->brightness[bl_idx];
3977 return convert_brightness_to_user(&caps, avg);
3979 int ret = dc_link_get_backlight_level(link);
3981 if (ret == DC_ERROR_UNEXPECTED)
3982 return dm->brightness[bl_idx];
3983 return convert_brightness_to_user(&caps, ret);
3987 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3989 struct amdgpu_display_manager *dm = bl_get_data(bd);
3992 for (i = 0; i < dm->num_of_edps; i++) {
3993 if (bd == dm->backlight_dev[i])
3996 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3998 return amdgpu_dm_backlight_get_level(dm, i);
4001 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4002 .options = BL_CORE_SUSPENDRESUME,
4003 .get_brightness = amdgpu_dm_backlight_get_brightness,
4004 .update_status = amdgpu_dm_backlight_update_status,
4008 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4011 struct backlight_properties props = { 0 };
4013 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4014 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4016 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4017 props.brightness = AMDGPU_MAX_BL_LEVEL;
4018 props.type = BACKLIGHT_RAW;
4020 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4021 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4023 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4024 adev_to_drm(dm->adev)->dev,
4026 &amdgpu_dm_backlight_ops,
4029 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4030 DRM_ERROR("DM: Backlight registration failed!\n");
4032 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4036 static int initialize_plane(struct amdgpu_display_manager *dm,
4037 struct amdgpu_mode_info *mode_info, int plane_id,
4038 enum drm_plane_type plane_type,
4039 const struct dc_plane_cap *plane_cap)
4041 struct drm_plane *plane;
4042 unsigned long possible_crtcs;
4045 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4047 DRM_ERROR("KMS: Failed to allocate plane\n");
4050 plane->type = plane_type;
4053 * HACK: IGT tests expect that the primary plane for a CRTC
4054 * can only have one possible CRTC. Only expose support for
4055 * any CRTC if they're not going to be used as a primary plane
4056 * for a CRTC - like overlay or underlay planes.
4058 possible_crtcs = 1 << plane_id;
4059 if (plane_id >= dm->dc->caps.max_streams)
4060 possible_crtcs = 0xff;
4062 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4065 DRM_ERROR("KMS: Failed to initialize plane\n");
4071 mode_info->planes[plane_id] = plane;
4077 static void register_backlight_device(struct amdgpu_display_manager *dm,
4078 struct dc_link *link)
4080 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4081 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4083 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4084 link->type != dc_connection_none) {
4086 * Event if registration failed, we should continue with
4087 * DM initialization because not having a backlight control
4088 * is better then a black screen.
4090 if (!dm->backlight_dev[dm->num_of_edps])
4091 amdgpu_dm_register_backlight_device(dm);
4093 if (dm->backlight_dev[dm->num_of_edps]) {
4094 dm->backlight_link[dm->num_of_edps] = link;
4103 * In this architecture, the association
4104 * connector -> encoder -> crtc
4105 * id not really requried. The crtc and connector will hold the
4106 * display_index as an abstraction to use with DAL component
4108 * Returns 0 on success
4110 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4112 struct amdgpu_display_manager *dm = &adev->dm;
4114 struct amdgpu_dm_connector *aconnector = NULL;
4115 struct amdgpu_encoder *aencoder = NULL;
4116 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4118 int32_t primary_planes;
4119 enum dc_connection_type new_connection_type = dc_connection_none;
4120 const struct dc_plane_cap *plane;
4121 bool psr_feature_enabled = false;
4123 dm->display_indexes_num = dm->dc->caps.max_streams;
4124 /* Update the actual used number of crtc */
4125 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4127 link_cnt = dm->dc->caps.max_links;
4128 if (amdgpu_dm_mode_config_init(dm->adev)) {
4129 DRM_ERROR("DM: Failed to initialize mode config\n");
4133 /* There is one primary plane per CRTC */
4134 primary_planes = dm->dc->caps.max_streams;
4135 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4138 * Initialize primary planes, implicit planes for legacy IOCTLS.
4139 * Order is reversed to match iteration order in atomic check.
4141 for (i = (primary_planes - 1); i >= 0; i--) {
4142 plane = &dm->dc->caps.planes[i];
4144 if (initialize_plane(dm, mode_info, i,
4145 DRM_PLANE_TYPE_PRIMARY, plane)) {
4146 DRM_ERROR("KMS: Failed to initialize primary plane\n");
4152 * Initialize overlay planes, index starting after primary planes.
4153 * These planes have a higher DRM index than the primary planes since
4154 * they should be considered as having a higher z-order.
4155 * Order is reversed to match iteration order in atomic check.
4157 * Only support DCN for now, and only expose one so we don't encourage
4158 * userspace to use up all the pipes.
4160 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4161 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4163 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4166 if (!plane->blends_with_above || !plane->blends_with_below)
4169 if (!plane->pixel_format_support.argb8888)
4172 if (initialize_plane(dm, NULL, primary_planes + i,
4173 DRM_PLANE_TYPE_OVERLAY, plane)) {
4174 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4178 /* Only create one overlay plane. */
4182 for (i = 0; i < dm->dc->caps.max_streams; i++)
4183 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4184 DRM_ERROR("KMS: Failed to initialize crtc\n");
4188 #if defined(CONFIG_DRM_AMD_DC_DCN)
4189 /* Use Outbox interrupt */
4190 switch (adev->ip_versions[DCE_HWIP][0]) {
4191 case IP_VERSION(3, 0, 0):
4192 case IP_VERSION(3, 1, 2):
4193 case IP_VERSION(3, 1, 3):
4194 case IP_VERSION(2, 1, 0):
4195 if (register_outbox_irq_handlers(dm->adev)) {
4196 DRM_ERROR("DM: Failed to initialize IRQ\n");
4201 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4202 adev->ip_versions[DCE_HWIP][0]);
4205 /* Determine whether to enable PSR support by default. */
4206 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4207 switch (adev->ip_versions[DCE_HWIP][0]) {
4208 case IP_VERSION(3, 1, 2):
4209 case IP_VERSION(3, 1, 3):
4210 psr_feature_enabled = true;
4213 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4219 /* loops over all connectors on the board */
4220 for (i = 0; i < link_cnt; i++) {
4221 struct dc_link *link = NULL;
4223 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4225 "KMS: Cannot support more than %d display indexes\n",
4226 AMDGPU_DM_MAX_DISPLAY_INDEX);
4230 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4234 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4238 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4239 DRM_ERROR("KMS: Failed to initialize encoder\n");
4243 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4244 DRM_ERROR("KMS: Failed to initialize connector\n");
4248 link = dc_get_link_at_index(dm->dc, i);
4250 if (!dc_link_detect_sink(link, &new_connection_type))
4251 DRM_ERROR("KMS: Failed to detect connector\n");
4253 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4254 emulated_link_detect(link);
4255 amdgpu_dm_update_connector_after_detect(aconnector);
4257 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4258 amdgpu_dm_update_connector_after_detect(aconnector);
4259 register_backlight_device(dm, link);
4260 if (dm->num_of_edps)
4261 update_connector_ext_caps(aconnector);
4262 if (psr_feature_enabled)
4263 amdgpu_dm_set_psr_caps(link);
4270 * Disable vblank IRQs aggressively for power-saving.
4272 * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR
4273 * is also supported.
4275 adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled;
4277 /* Software is initialized. Now we can register interrupt handlers. */
4278 switch (adev->asic_type) {
4279 #if defined(CONFIG_DRM_AMD_DC_SI)
4284 if (dce60_register_irq_handlers(dm->adev)) {
4285 DRM_ERROR("DM: Failed to initialize IRQ\n");
4299 case CHIP_POLARIS11:
4300 case CHIP_POLARIS10:
4301 case CHIP_POLARIS12:
4306 if (dce110_register_irq_handlers(dm->adev)) {
4307 DRM_ERROR("DM: Failed to initialize IRQ\n");
4312 #if defined(CONFIG_DRM_AMD_DC_DCN)
4313 switch (adev->ip_versions[DCE_HWIP][0]) {
4314 case IP_VERSION(1, 0, 0):
4315 case IP_VERSION(1, 0, 1):
4316 case IP_VERSION(2, 0, 2):
4317 case IP_VERSION(2, 0, 3):
4318 case IP_VERSION(2, 0, 0):
4319 case IP_VERSION(2, 1, 0):
4320 case IP_VERSION(3, 0, 0):
4321 case IP_VERSION(3, 0, 2):
4322 case IP_VERSION(3, 0, 3):
4323 case IP_VERSION(3, 0, 1):
4324 case IP_VERSION(3, 1, 2):
4325 case IP_VERSION(3, 1, 3):
4326 if (dcn10_register_irq_handlers(dm->adev)) {
4327 DRM_ERROR("DM: Failed to initialize IRQ\n");
4332 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4333 adev->ip_versions[DCE_HWIP][0]);
4348 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4350 drm_atomic_private_obj_fini(&dm->atomic_obj);
4354 /******************************************************************************
4355 * amdgpu_display_funcs functions
4356 *****************************************************************************/
4359 * dm_bandwidth_update - program display watermarks
4361 * @adev: amdgpu_device pointer
4363 * Calculate and program the display watermarks and line buffer allocation.
4365 static void dm_bandwidth_update(struct amdgpu_device *adev)
4367 /* TODO: implement later */
4370 static const struct amdgpu_display_funcs dm_display_funcs = {
4371 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4372 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4373 .backlight_set_level = NULL, /* never called for DC */
4374 .backlight_get_level = NULL, /* never called for DC */
4375 .hpd_sense = NULL,/* called unconditionally */
4376 .hpd_set_polarity = NULL, /* called unconditionally */
4377 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4378 .page_flip_get_scanoutpos =
4379 dm_crtc_get_scanoutpos,/* called unconditionally */
4380 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4381 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4384 #if defined(CONFIG_DEBUG_KERNEL_DC)
4386 static ssize_t s3_debug_store(struct device *device,
4387 struct device_attribute *attr,
4393 struct drm_device *drm_dev = dev_get_drvdata(device);
4394 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4396 ret = kstrtoint(buf, 0, &s3_state);
4401 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4406 return ret == 0 ? count : 0;
4409 DEVICE_ATTR_WO(s3_debug);
4413 static int dm_early_init(void *handle)
4415 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4417 switch (adev->asic_type) {
4418 #if defined(CONFIG_DRM_AMD_DC_SI)
4422 adev->mode_info.num_crtc = 6;
4423 adev->mode_info.num_hpd = 6;
4424 adev->mode_info.num_dig = 6;
4427 adev->mode_info.num_crtc = 2;
4428 adev->mode_info.num_hpd = 2;
4429 adev->mode_info.num_dig = 2;
4434 adev->mode_info.num_crtc = 6;
4435 adev->mode_info.num_hpd = 6;
4436 adev->mode_info.num_dig = 6;
4439 adev->mode_info.num_crtc = 4;
4440 adev->mode_info.num_hpd = 6;
4441 adev->mode_info.num_dig = 7;
4445 adev->mode_info.num_crtc = 2;
4446 adev->mode_info.num_hpd = 6;
4447 adev->mode_info.num_dig = 6;
4451 adev->mode_info.num_crtc = 6;
4452 adev->mode_info.num_hpd = 6;
4453 adev->mode_info.num_dig = 7;
4456 adev->mode_info.num_crtc = 3;
4457 adev->mode_info.num_hpd = 6;
4458 adev->mode_info.num_dig = 9;
4461 adev->mode_info.num_crtc = 2;
4462 adev->mode_info.num_hpd = 6;
4463 adev->mode_info.num_dig = 9;
4465 case CHIP_POLARIS11:
4466 case CHIP_POLARIS12:
4467 adev->mode_info.num_crtc = 5;
4468 adev->mode_info.num_hpd = 5;
4469 adev->mode_info.num_dig = 5;
4471 case CHIP_POLARIS10:
4473 adev->mode_info.num_crtc = 6;
4474 adev->mode_info.num_hpd = 6;
4475 adev->mode_info.num_dig = 6;
4480 adev->mode_info.num_crtc = 6;
4481 adev->mode_info.num_hpd = 6;
4482 adev->mode_info.num_dig = 6;
4485 #if defined(CONFIG_DRM_AMD_DC_DCN)
4486 switch (adev->ip_versions[DCE_HWIP][0]) {
4487 case IP_VERSION(2, 0, 2):
4488 case IP_VERSION(3, 0, 0):
4489 adev->mode_info.num_crtc = 6;
4490 adev->mode_info.num_hpd = 6;
4491 adev->mode_info.num_dig = 6;
4493 case IP_VERSION(2, 0, 0):
4494 case IP_VERSION(3, 0, 2):
4495 adev->mode_info.num_crtc = 5;
4496 adev->mode_info.num_hpd = 5;
4497 adev->mode_info.num_dig = 5;
4499 case IP_VERSION(2, 0, 3):
4500 case IP_VERSION(3, 0, 3):
4501 adev->mode_info.num_crtc = 2;
4502 adev->mode_info.num_hpd = 2;
4503 adev->mode_info.num_dig = 2;
4505 case IP_VERSION(1, 0, 0):
4506 case IP_VERSION(1, 0, 1):
4507 case IP_VERSION(3, 0, 1):
4508 case IP_VERSION(2, 1, 0):
4509 case IP_VERSION(3, 1, 2):
4510 case IP_VERSION(3, 1, 3):
4511 adev->mode_info.num_crtc = 4;
4512 adev->mode_info.num_hpd = 4;
4513 adev->mode_info.num_dig = 4;
4516 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4517 adev->ip_versions[DCE_HWIP][0]);
4524 amdgpu_dm_set_irq_funcs(adev);
4526 if (adev->mode_info.funcs == NULL)
4527 adev->mode_info.funcs = &dm_display_funcs;
4530 * Note: Do NOT change adev->audio_endpt_rreg and
4531 * adev->audio_endpt_wreg because they are initialised in
4532 * amdgpu_device_init()
4534 #if defined(CONFIG_DEBUG_KERNEL_DC)
4536 adev_to_drm(adev)->dev,
4537 &dev_attr_s3_debug);
4543 static bool modeset_required(struct drm_crtc_state *crtc_state,
4544 struct dc_stream_state *new_stream,
4545 struct dc_stream_state *old_stream)
4547 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4550 static bool modereset_required(struct drm_crtc_state *crtc_state)
4552 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4555 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4557 drm_encoder_cleanup(encoder);
4561 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4562 .destroy = amdgpu_dm_encoder_destroy,
4566 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4567 struct drm_framebuffer *fb,
4568 int *min_downscale, int *max_upscale)
4570 struct amdgpu_device *adev = drm_to_adev(dev);
4571 struct dc *dc = adev->dm.dc;
4572 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4573 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4575 switch (fb->format->format) {
4576 case DRM_FORMAT_P010:
4577 case DRM_FORMAT_NV12:
4578 case DRM_FORMAT_NV21:
4579 *max_upscale = plane_cap->max_upscale_factor.nv12;
4580 *min_downscale = plane_cap->max_downscale_factor.nv12;
4583 case DRM_FORMAT_XRGB16161616F:
4584 case DRM_FORMAT_ARGB16161616F:
4585 case DRM_FORMAT_XBGR16161616F:
4586 case DRM_FORMAT_ABGR16161616F:
4587 *max_upscale = plane_cap->max_upscale_factor.fp16;
4588 *min_downscale = plane_cap->max_downscale_factor.fp16;
4592 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4593 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4598 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4599 * scaling factor of 1.0 == 1000 units.
4601 if (*max_upscale == 1)
4602 *max_upscale = 1000;
4604 if (*min_downscale == 1)
4605 *min_downscale = 1000;
4609 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4610 const struct drm_plane_state *state,
4611 struct dc_scaling_info *scaling_info)
4613 int scale_w, scale_h, min_downscale, max_upscale;
4615 memset(scaling_info, 0, sizeof(*scaling_info));
4617 /* Source is fixed 16.16 but we ignore mantissa for now... */
4618 scaling_info->src_rect.x = state->src_x >> 16;
4619 scaling_info->src_rect.y = state->src_y >> 16;
4622 * For reasons we don't (yet) fully understand a non-zero
4623 * src_y coordinate into an NV12 buffer can cause a
4624 * system hang on DCN1x.
4625 * To avoid hangs (and maybe be overly cautious)
4626 * let's reject both non-zero src_x and src_y.
4628 * We currently know of only one use-case to reproduce a
4629 * scenario with non-zero src_x and src_y for NV12, which
4630 * is to gesture the YouTube Android app into full screen
4633 if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4634 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4635 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4636 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4639 scaling_info->src_rect.width = state->src_w >> 16;
4640 if (scaling_info->src_rect.width == 0)
4643 scaling_info->src_rect.height = state->src_h >> 16;
4644 if (scaling_info->src_rect.height == 0)
4647 scaling_info->dst_rect.x = state->crtc_x;
4648 scaling_info->dst_rect.y = state->crtc_y;
4650 if (state->crtc_w == 0)
4653 scaling_info->dst_rect.width = state->crtc_w;
4655 if (state->crtc_h == 0)
4658 scaling_info->dst_rect.height = state->crtc_h;
4660 /* DRM doesn't specify clipping on destination output. */
4661 scaling_info->clip_rect = scaling_info->dst_rect;
4663 /* Validate scaling per-format with DC plane caps */
4664 if (state->plane && state->plane->dev && state->fb) {
4665 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4666 &min_downscale, &max_upscale);
4668 min_downscale = 250;
4669 max_upscale = 16000;
4672 scale_w = scaling_info->dst_rect.width * 1000 /
4673 scaling_info->src_rect.width;
4675 if (scale_w < min_downscale || scale_w > max_upscale)
4678 scale_h = scaling_info->dst_rect.height * 1000 /
4679 scaling_info->src_rect.height;
4681 if (scale_h < min_downscale || scale_h > max_upscale)
4685 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4686 * assume reasonable defaults based on the format.
4693 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4694 uint64_t tiling_flags)
4696 /* Fill GFX8 params */
4697 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4698 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4700 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4701 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4702 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4703 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4704 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4706 /* XXX fix me for VI */
4707 tiling_info->gfx8.num_banks = num_banks;
4708 tiling_info->gfx8.array_mode =
4709 DC_ARRAY_2D_TILED_THIN1;
4710 tiling_info->gfx8.tile_split = tile_split;
4711 tiling_info->gfx8.bank_width = bankw;
4712 tiling_info->gfx8.bank_height = bankh;
4713 tiling_info->gfx8.tile_aspect = mtaspect;
4714 tiling_info->gfx8.tile_mode =
4715 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4716 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4717 == DC_ARRAY_1D_TILED_THIN1) {
4718 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4721 tiling_info->gfx8.pipe_config =
4722 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4726 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4727 union dc_tiling_info *tiling_info)
4729 tiling_info->gfx9.num_pipes =
4730 adev->gfx.config.gb_addr_config_fields.num_pipes;
4731 tiling_info->gfx9.num_banks =
4732 adev->gfx.config.gb_addr_config_fields.num_banks;
4733 tiling_info->gfx9.pipe_interleave =
4734 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4735 tiling_info->gfx9.num_shader_engines =
4736 adev->gfx.config.gb_addr_config_fields.num_se;
4737 tiling_info->gfx9.max_compressed_frags =
4738 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4739 tiling_info->gfx9.num_rb_per_se =
4740 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4741 tiling_info->gfx9.shaderEnable = 1;
4742 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4743 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4747 validate_dcc(struct amdgpu_device *adev,
4748 const enum surface_pixel_format format,
4749 const enum dc_rotation_angle rotation,
4750 const union dc_tiling_info *tiling_info,
4751 const struct dc_plane_dcc_param *dcc,
4752 const struct dc_plane_address *address,
4753 const struct plane_size *plane_size)
4755 struct dc *dc = adev->dm.dc;
4756 struct dc_dcc_surface_param input;
4757 struct dc_surface_dcc_cap output;
4759 memset(&input, 0, sizeof(input));
4760 memset(&output, 0, sizeof(output));
4765 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4766 !dc->cap_funcs.get_dcc_compression_cap)
4769 input.format = format;
4770 input.surface_size.width = plane_size->surface_size.width;
4771 input.surface_size.height = plane_size->surface_size.height;
4772 input.swizzle_mode = tiling_info->gfx9.swizzle;
4774 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4775 input.scan = SCAN_DIRECTION_HORIZONTAL;
4776 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4777 input.scan = SCAN_DIRECTION_VERTICAL;
4779 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4782 if (!output.capable)
4785 if (dcc->independent_64b_blks == 0 &&
4786 output.grph.rgb.independent_64b_blks != 0)
4793 modifier_has_dcc(uint64_t modifier)
4795 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4799 modifier_gfx9_swizzle_mode(uint64_t modifier)
4801 if (modifier == DRM_FORMAT_MOD_LINEAR)
4804 return AMD_FMT_MOD_GET(TILE, modifier);
4807 static const struct drm_format_info *
4808 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4810 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4814 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4815 union dc_tiling_info *tiling_info,
4818 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4819 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4820 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4821 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4823 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4825 if (!IS_AMD_FMT_MOD(modifier))
4828 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4829 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4831 if (adev->family >= AMDGPU_FAMILY_NV) {
4832 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4834 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4836 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4840 enum dm_micro_swizzle {
4841 MICRO_SWIZZLE_Z = 0,
4842 MICRO_SWIZZLE_S = 1,
4843 MICRO_SWIZZLE_D = 2,
4847 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4851 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4852 const struct drm_format_info *info = drm_format_info(format);
4855 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4861 * We always have to allow these modifiers:
4862 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4863 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4865 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4866 modifier == DRM_FORMAT_MOD_INVALID) {
4870 /* Check that the modifier is on the list of the plane's supported modifiers. */
4871 for (i = 0; i < plane->modifier_count; i++) {
4872 if (modifier == plane->modifiers[i])
4875 if (i == plane->modifier_count)
4879 * For D swizzle the canonical modifier depends on the bpp, so check
4882 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4883 adev->family >= AMDGPU_FAMILY_NV) {
4884 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4888 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4892 if (modifier_has_dcc(modifier)) {
4893 /* Per radeonsi comments 16/64 bpp are more complicated. */
4894 if (info->cpp[0] != 4)
4896 /* We support multi-planar formats, but not when combined with
4897 * additional DCC metadata planes. */
4898 if (info->num_planes > 1)
4906 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4911 if (*cap - *size < 1) {
4912 uint64_t new_cap = *cap * 2;
4913 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4921 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4927 (*mods)[*size] = mod;
4932 add_gfx9_modifiers(const struct amdgpu_device *adev,
4933 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4935 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4936 int pipe_xor_bits = min(8, pipes +
4937 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4938 int bank_xor_bits = min(8 - pipe_xor_bits,
4939 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4940 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4941 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4944 if (adev->family == AMDGPU_FAMILY_RV) {
4945 /* Raven2 and later */
4946 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4949 * No _D DCC swizzles yet because we only allow 32bpp, which
4950 * doesn't support _D on DCN
4953 if (has_constant_encode) {
4954 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4955 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4956 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4957 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4958 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4959 AMD_FMT_MOD_SET(DCC, 1) |
4960 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4961 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4962 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4965 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4966 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4967 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4968 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4969 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4970 AMD_FMT_MOD_SET(DCC, 1) |
4971 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4972 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4973 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4975 if (has_constant_encode) {
4976 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4977 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4978 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4979 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4980 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4981 AMD_FMT_MOD_SET(DCC, 1) |
4982 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4983 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4984 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4986 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4987 AMD_FMT_MOD_SET(RB, rb) |
4988 AMD_FMT_MOD_SET(PIPE, pipes));
4991 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4992 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4993 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4994 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4995 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4996 AMD_FMT_MOD_SET(DCC, 1) |
4997 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4998 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4999 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5000 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5001 AMD_FMT_MOD_SET(RB, rb) |
5002 AMD_FMT_MOD_SET(PIPE, pipes));
5006 * Only supported for 64bpp on Raven, will be filtered on format in
5007 * dm_plane_format_mod_supported.
5009 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5010 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5011 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5012 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5013 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5015 if (adev->family == AMDGPU_FAMILY_RV) {
5016 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5017 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5018 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5019 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5020 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5024 * Only supported for 64bpp on Raven, will be filtered on format in
5025 * dm_plane_format_mod_supported.
5027 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5028 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5029 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5031 if (adev->family == AMDGPU_FAMILY_RV) {
5032 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5033 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5034 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5039 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5040 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5042 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5044 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5045 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5046 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5047 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5048 AMD_FMT_MOD_SET(DCC, 1) |
5049 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5050 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5051 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5053 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5054 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5055 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5056 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5057 AMD_FMT_MOD_SET(DCC, 1) |
5058 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5059 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5060 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5061 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5063 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5064 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5065 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5066 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5068 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5069 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5070 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5071 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5074 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5075 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5076 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5077 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5079 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5080 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5081 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5085 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5086 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5088 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5089 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5091 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5092 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5093 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5094 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5095 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5096 AMD_FMT_MOD_SET(DCC, 1) |
5097 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5098 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5099 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5100 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5102 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5103 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5104 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5105 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5106 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5107 AMD_FMT_MOD_SET(DCC, 1) |
5108 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5109 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5110 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5112 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5113 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5114 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5115 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5116 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5117 AMD_FMT_MOD_SET(DCC, 1) |
5118 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5119 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5120 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5121 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5122 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5124 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5125 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5126 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5127 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5128 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5129 AMD_FMT_MOD_SET(DCC, 1) |
5130 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5131 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5132 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5133 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5135 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5136 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5137 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5138 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5139 AMD_FMT_MOD_SET(PACKERS, pkrs));
5141 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5142 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5143 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5144 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5145 AMD_FMT_MOD_SET(PACKERS, pkrs));
5147 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5148 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5149 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5150 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5152 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5153 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5154 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5158 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5160 uint64_t size = 0, capacity = 128;
5163 /* We have not hooked up any pre-GFX9 modifiers. */
5164 if (adev->family < AMDGPU_FAMILY_AI)
5167 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5169 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5170 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5171 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5172 return *mods ? 0 : -ENOMEM;
5175 switch (adev->family) {
5176 case AMDGPU_FAMILY_AI:
5177 case AMDGPU_FAMILY_RV:
5178 add_gfx9_modifiers(adev, mods, &size, &capacity);
5180 case AMDGPU_FAMILY_NV:
5181 case AMDGPU_FAMILY_VGH:
5182 case AMDGPU_FAMILY_YC:
5183 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5184 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5186 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5190 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5192 /* INVALID marks the end of the list. */
5193 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5202 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5203 const struct amdgpu_framebuffer *afb,
5204 const enum surface_pixel_format format,
5205 const enum dc_rotation_angle rotation,
5206 const struct plane_size *plane_size,
5207 union dc_tiling_info *tiling_info,
5208 struct dc_plane_dcc_param *dcc,
5209 struct dc_plane_address *address,
5210 const bool force_disable_dcc)
5212 const uint64_t modifier = afb->base.modifier;
5215 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5216 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5218 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5219 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5220 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5221 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5224 dcc->meta_pitch = afb->base.pitches[1];
5225 dcc->independent_64b_blks = independent_64b_blks;
5226 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5227 if (independent_64b_blks && independent_128b_blks)
5228 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5229 else if (independent_128b_blks)
5230 dcc->dcc_ind_blk = hubp_ind_block_128b;
5231 else if (independent_64b_blks && !independent_128b_blks)
5232 dcc->dcc_ind_blk = hubp_ind_block_64b;
5234 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5236 if (independent_64b_blks)
5237 dcc->dcc_ind_blk = hubp_ind_block_64b;
5239 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5242 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5243 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5246 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5248 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5254 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5255 const struct amdgpu_framebuffer *afb,
5256 const enum surface_pixel_format format,
5257 const enum dc_rotation_angle rotation,
5258 const uint64_t tiling_flags,
5259 union dc_tiling_info *tiling_info,
5260 struct plane_size *plane_size,
5261 struct dc_plane_dcc_param *dcc,
5262 struct dc_plane_address *address,
5264 bool force_disable_dcc)
5266 const struct drm_framebuffer *fb = &afb->base;
5269 memset(tiling_info, 0, sizeof(*tiling_info));
5270 memset(plane_size, 0, sizeof(*plane_size));
5271 memset(dcc, 0, sizeof(*dcc));
5272 memset(address, 0, sizeof(*address));
5274 address->tmz_surface = tmz_surface;
5276 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5277 uint64_t addr = afb->address + fb->offsets[0];
5279 plane_size->surface_size.x = 0;
5280 plane_size->surface_size.y = 0;
5281 plane_size->surface_size.width = fb->width;
5282 plane_size->surface_size.height = fb->height;
5283 plane_size->surface_pitch =
5284 fb->pitches[0] / fb->format->cpp[0];
5286 address->type = PLN_ADDR_TYPE_GRAPHICS;
5287 address->grph.addr.low_part = lower_32_bits(addr);
5288 address->grph.addr.high_part = upper_32_bits(addr);
5289 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5290 uint64_t luma_addr = afb->address + fb->offsets[0];
5291 uint64_t chroma_addr = afb->address + fb->offsets[1];
5293 plane_size->surface_size.x = 0;
5294 plane_size->surface_size.y = 0;
5295 plane_size->surface_size.width = fb->width;
5296 plane_size->surface_size.height = fb->height;
5297 plane_size->surface_pitch =
5298 fb->pitches[0] / fb->format->cpp[0];
5300 plane_size->chroma_size.x = 0;
5301 plane_size->chroma_size.y = 0;
5302 /* TODO: set these based on surface format */
5303 plane_size->chroma_size.width = fb->width / 2;
5304 plane_size->chroma_size.height = fb->height / 2;
5306 plane_size->chroma_pitch =
5307 fb->pitches[1] / fb->format->cpp[1];
5309 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5310 address->video_progressive.luma_addr.low_part =
5311 lower_32_bits(luma_addr);
5312 address->video_progressive.luma_addr.high_part =
5313 upper_32_bits(luma_addr);
5314 address->video_progressive.chroma_addr.low_part =
5315 lower_32_bits(chroma_addr);
5316 address->video_progressive.chroma_addr.high_part =
5317 upper_32_bits(chroma_addr);
5320 if (adev->family >= AMDGPU_FAMILY_AI) {
5321 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5322 rotation, plane_size,
5329 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5336 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5337 bool *per_pixel_alpha, bool *global_alpha,
5338 int *global_alpha_value)
5340 *per_pixel_alpha = false;
5341 *global_alpha = false;
5342 *global_alpha_value = 0xff;
5344 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5347 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5348 static const uint32_t alpha_formats[] = {
5349 DRM_FORMAT_ARGB8888,
5350 DRM_FORMAT_RGBA8888,
5351 DRM_FORMAT_ABGR8888,
5353 uint32_t format = plane_state->fb->format->format;
5356 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5357 if (format == alpha_formats[i]) {
5358 *per_pixel_alpha = true;
5364 if (plane_state->alpha < 0xffff) {
5365 *global_alpha = true;
5366 *global_alpha_value = plane_state->alpha >> 8;
5371 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5372 const enum surface_pixel_format format,
5373 enum dc_color_space *color_space)
5377 *color_space = COLOR_SPACE_SRGB;
5379 /* DRM color properties only affect non-RGB formats. */
5380 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5383 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5385 switch (plane_state->color_encoding) {
5386 case DRM_COLOR_YCBCR_BT601:
5388 *color_space = COLOR_SPACE_YCBCR601;
5390 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5393 case DRM_COLOR_YCBCR_BT709:
5395 *color_space = COLOR_SPACE_YCBCR709;
5397 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5400 case DRM_COLOR_YCBCR_BT2020:
5402 *color_space = COLOR_SPACE_2020_YCBCR;
5415 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5416 const struct drm_plane_state *plane_state,
5417 const uint64_t tiling_flags,
5418 struct dc_plane_info *plane_info,
5419 struct dc_plane_address *address,
5421 bool force_disable_dcc)
5423 const struct drm_framebuffer *fb = plane_state->fb;
5424 const struct amdgpu_framebuffer *afb =
5425 to_amdgpu_framebuffer(plane_state->fb);
5428 memset(plane_info, 0, sizeof(*plane_info));
5430 switch (fb->format->format) {
5432 plane_info->format =
5433 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5435 case DRM_FORMAT_RGB565:
5436 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5438 case DRM_FORMAT_XRGB8888:
5439 case DRM_FORMAT_ARGB8888:
5440 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5442 case DRM_FORMAT_XRGB2101010:
5443 case DRM_FORMAT_ARGB2101010:
5444 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5446 case DRM_FORMAT_XBGR2101010:
5447 case DRM_FORMAT_ABGR2101010:
5448 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5450 case DRM_FORMAT_XBGR8888:
5451 case DRM_FORMAT_ABGR8888:
5452 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5454 case DRM_FORMAT_NV21:
5455 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5457 case DRM_FORMAT_NV12:
5458 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5460 case DRM_FORMAT_P010:
5461 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5463 case DRM_FORMAT_XRGB16161616F:
5464 case DRM_FORMAT_ARGB16161616F:
5465 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5467 case DRM_FORMAT_XBGR16161616F:
5468 case DRM_FORMAT_ABGR16161616F:
5469 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5471 case DRM_FORMAT_XRGB16161616:
5472 case DRM_FORMAT_ARGB16161616:
5473 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5475 case DRM_FORMAT_XBGR16161616:
5476 case DRM_FORMAT_ABGR16161616:
5477 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5481 "Unsupported screen format %p4cc\n",
5482 &fb->format->format);
5486 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5487 case DRM_MODE_ROTATE_0:
5488 plane_info->rotation = ROTATION_ANGLE_0;
5490 case DRM_MODE_ROTATE_90:
5491 plane_info->rotation = ROTATION_ANGLE_90;
5493 case DRM_MODE_ROTATE_180:
5494 plane_info->rotation = ROTATION_ANGLE_180;
5496 case DRM_MODE_ROTATE_270:
5497 plane_info->rotation = ROTATION_ANGLE_270;
5500 plane_info->rotation = ROTATION_ANGLE_0;
5504 plane_info->visible = true;
5505 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5507 plane_info->layer_index = 0;
5509 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5510 &plane_info->color_space);
5514 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5515 plane_info->rotation, tiling_flags,
5516 &plane_info->tiling_info,
5517 &plane_info->plane_size,
5518 &plane_info->dcc, address, tmz_surface,
5523 fill_blending_from_plane_state(
5524 plane_state, &plane_info->per_pixel_alpha,
5525 &plane_info->global_alpha, &plane_info->global_alpha_value);
5530 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5531 struct dc_plane_state *dc_plane_state,
5532 struct drm_plane_state *plane_state,
5533 struct drm_crtc_state *crtc_state)
5535 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5536 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5537 struct dc_scaling_info scaling_info;
5538 struct dc_plane_info plane_info;
5540 bool force_disable_dcc = false;
5542 ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5546 dc_plane_state->src_rect = scaling_info.src_rect;
5547 dc_plane_state->dst_rect = scaling_info.dst_rect;
5548 dc_plane_state->clip_rect = scaling_info.clip_rect;
5549 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5551 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5552 ret = fill_dc_plane_info_and_addr(adev, plane_state,
5555 &dc_plane_state->address,
5561 dc_plane_state->format = plane_info.format;
5562 dc_plane_state->color_space = plane_info.color_space;
5563 dc_plane_state->format = plane_info.format;
5564 dc_plane_state->plane_size = plane_info.plane_size;
5565 dc_plane_state->rotation = plane_info.rotation;
5566 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5567 dc_plane_state->stereo_format = plane_info.stereo_format;
5568 dc_plane_state->tiling_info = plane_info.tiling_info;
5569 dc_plane_state->visible = plane_info.visible;
5570 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5571 dc_plane_state->global_alpha = plane_info.global_alpha;
5572 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5573 dc_plane_state->dcc = plane_info.dcc;
5574 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5575 dc_plane_state->flip_int_enabled = true;
5578 * Always set input transfer function, since plane state is refreshed
5581 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5588 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5589 const struct dm_connector_state *dm_state,
5590 struct dc_stream_state *stream)
5592 enum amdgpu_rmx_type rmx_type;
5594 struct rect src = { 0 }; /* viewport in composition space*/
5595 struct rect dst = { 0 }; /* stream addressable area */
5597 /* no mode. nothing to be done */
5601 /* Full screen scaling by default */
5602 src.width = mode->hdisplay;
5603 src.height = mode->vdisplay;
5604 dst.width = stream->timing.h_addressable;
5605 dst.height = stream->timing.v_addressable;
5608 rmx_type = dm_state->scaling;
5609 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5610 if (src.width * dst.height <
5611 src.height * dst.width) {
5612 /* height needs less upscaling/more downscaling */
5613 dst.width = src.width *
5614 dst.height / src.height;
5616 /* width needs less upscaling/more downscaling */
5617 dst.height = src.height *
5618 dst.width / src.width;
5620 } else if (rmx_type == RMX_CENTER) {
5624 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5625 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5627 if (dm_state->underscan_enable) {
5628 dst.x += dm_state->underscan_hborder / 2;
5629 dst.y += dm_state->underscan_vborder / 2;
5630 dst.width -= dm_state->underscan_hborder;
5631 dst.height -= dm_state->underscan_vborder;
5638 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5639 dst.x, dst.y, dst.width, dst.height);
5643 static enum dc_color_depth
5644 convert_color_depth_from_display_info(const struct drm_connector *connector,
5645 bool is_y420, int requested_bpc)
5652 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5653 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5655 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5657 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5660 bpc = (uint8_t)connector->display_info.bpc;
5661 /* Assume 8 bpc by default if no bpc is specified. */
5662 bpc = bpc ? bpc : 8;
5665 if (requested_bpc > 0) {
5667 * Cap display bpc based on the user requested value.
5669 * The value for state->max_bpc may not correctly updated
5670 * depending on when the connector gets added to the state
5671 * or if this was called outside of atomic check, so it
5672 * can't be used directly.
5674 bpc = min_t(u8, bpc, requested_bpc);
5676 /* Round down to the nearest even number. */
5677 bpc = bpc - (bpc & 1);
5683 * Temporary Work around, DRM doesn't parse color depth for
5684 * EDID revision before 1.4
5685 * TODO: Fix edid parsing
5687 return COLOR_DEPTH_888;
5689 return COLOR_DEPTH_666;
5691 return COLOR_DEPTH_888;
5693 return COLOR_DEPTH_101010;
5695 return COLOR_DEPTH_121212;
5697 return COLOR_DEPTH_141414;
5699 return COLOR_DEPTH_161616;
5701 return COLOR_DEPTH_UNDEFINED;
5705 static enum dc_aspect_ratio
5706 get_aspect_ratio(const struct drm_display_mode *mode_in)
5708 /* 1-1 mapping, since both enums follow the HDMI spec. */
5709 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5712 static enum dc_color_space
5713 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5715 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5717 switch (dc_crtc_timing->pixel_encoding) {
5718 case PIXEL_ENCODING_YCBCR422:
5719 case PIXEL_ENCODING_YCBCR444:
5720 case PIXEL_ENCODING_YCBCR420:
5723 * 27030khz is the separation point between HDTV and SDTV
5724 * according to HDMI spec, we use YCbCr709 and YCbCr601
5727 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5728 if (dc_crtc_timing->flags.Y_ONLY)
5730 COLOR_SPACE_YCBCR709_LIMITED;
5732 color_space = COLOR_SPACE_YCBCR709;
5734 if (dc_crtc_timing->flags.Y_ONLY)
5736 COLOR_SPACE_YCBCR601_LIMITED;
5738 color_space = COLOR_SPACE_YCBCR601;
5743 case PIXEL_ENCODING_RGB:
5744 color_space = COLOR_SPACE_SRGB;
5755 static bool adjust_colour_depth_from_display_info(
5756 struct dc_crtc_timing *timing_out,
5757 const struct drm_display_info *info)
5759 enum dc_color_depth depth = timing_out->display_color_depth;
5762 normalized_clk = timing_out->pix_clk_100hz / 10;
5763 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5764 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5765 normalized_clk /= 2;
5766 /* Adjusting pix clock following on HDMI spec based on colour depth */
5768 case COLOR_DEPTH_888:
5770 case COLOR_DEPTH_101010:
5771 normalized_clk = (normalized_clk * 30) / 24;
5773 case COLOR_DEPTH_121212:
5774 normalized_clk = (normalized_clk * 36) / 24;
5776 case COLOR_DEPTH_161616:
5777 normalized_clk = (normalized_clk * 48) / 24;
5780 /* The above depths are the only ones valid for HDMI. */
5783 if (normalized_clk <= info->max_tmds_clock) {
5784 timing_out->display_color_depth = depth;
5787 } while (--depth > COLOR_DEPTH_666);
5791 static void fill_stream_properties_from_drm_display_mode(
5792 struct dc_stream_state *stream,
5793 const struct drm_display_mode *mode_in,
5794 const struct drm_connector *connector,
5795 const struct drm_connector_state *connector_state,
5796 const struct dc_stream_state *old_stream,
5799 struct dc_crtc_timing *timing_out = &stream->timing;
5800 const struct drm_display_info *info = &connector->display_info;
5801 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5802 struct hdmi_vendor_infoframe hv_frame;
5803 struct hdmi_avi_infoframe avi_frame;
5805 memset(&hv_frame, 0, sizeof(hv_frame));
5806 memset(&avi_frame, 0, sizeof(avi_frame));
5808 timing_out->h_border_left = 0;
5809 timing_out->h_border_right = 0;
5810 timing_out->v_border_top = 0;
5811 timing_out->v_border_bottom = 0;
5812 /* TODO: un-hardcode */
5813 if (drm_mode_is_420_only(info, mode_in)
5814 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5815 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5816 else if (drm_mode_is_420_also(info, mode_in)
5817 && aconnector->force_yuv420_output)
5818 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5819 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5820 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5821 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5823 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5825 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5826 timing_out->display_color_depth = convert_color_depth_from_display_info(
5828 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5830 timing_out->scan_type = SCANNING_TYPE_NODATA;
5831 timing_out->hdmi_vic = 0;
5834 timing_out->vic = old_stream->timing.vic;
5835 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5836 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5838 timing_out->vic = drm_match_cea_mode(mode_in);
5839 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5840 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5841 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5842 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5845 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5846 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5847 timing_out->vic = avi_frame.video_code;
5848 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5849 timing_out->hdmi_vic = hv_frame.vic;
5852 if (is_freesync_video_mode(mode_in, aconnector)) {
5853 timing_out->h_addressable = mode_in->hdisplay;
5854 timing_out->h_total = mode_in->htotal;
5855 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5856 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5857 timing_out->v_total = mode_in->vtotal;
5858 timing_out->v_addressable = mode_in->vdisplay;
5859 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5860 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5861 timing_out->pix_clk_100hz = mode_in->clock * 10;
5863 timing_out->h_addressable = mode_in->crtc_hdisplay;
5864 timing_out->h_total = mode_in->crtc_htotal;
5865 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5866 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5867 timing_out->v_total = mode_in->crtc_vtotal;
5868 timing_out->v_addressable = mode_in->crtc_vdisplay;
5869 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5870 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5871 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5874 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5876 stream->output_color_space = get_output_color_space(timing_out);
5878 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5879 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5880 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5881 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5882 drm_mode_is_420_also(info, mode_in) &&
5883 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5884 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5885 adjust_colour_depth_from_display_info(timing_out, info);
5890 static void fill_audio_info(struct audio_info *audio_info,
5891 const struct drm_connector *drm_connector,
5892 const struct dc_sink *dc_sink)
5895 int cea_revision = 0;
5896 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5898 audio_info->manufacture_id = edid_caps->manufacturer_id;
5899 audio_info->product_id = edid_caps->product_id;
5901 cea_revision = drm_connector->display_info.cea_rev;
5903 strscpy(audio_info->display_name,
5904 edid_caps->display_name,
5905 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5907 if (cea_revision >= 3) {
5908 audio_info->mode_count = edid_caps->audio_mode_count;
5910 for (i = 0; i < audio_info->mode_count; ++i) {
5911 audio_info->modes[i].format_code =
5912 (enum audio_format_code)
5913 (edid_caps->audio_modes[i].format_code);
5914 audio_info->modes[i].channel_count =
5915 edid_caps->audio_modes[i].channel_count;
5916 audio_info->modes[i].sample_rates.all =
5917 edid_caps->audio_modes[i].sample_rate;
5918 audio_info->modes[i].sample_size =
5919 edid_caps->audio_modes[i].sample_size;
5923 audio_info->flags.all = edid_caps->speaker_flags;
5925 /* TODO: We only check for the progressive mode, check for interlace mode too */
5926 if (drm_connector->latency_present[0]) {
5927 audio_info->video_latency = drm_connector->video_latency[0];
5928 audio_info->audio_latency = drm_connector->audio_latency[0];
5931 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5936 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5937 struct drm_display_mode *dst_mode)
5939 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5940 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5941 dst_mode->crtc_clock = src_mode->crtc_clock;
5942 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5943 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5944 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5945 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5946 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5947 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5948 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5949 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5950 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5951 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5952 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5956 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5957 const struct drm_display_mode *native_mode,
5960 if (scale_enabled) {
5961 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5962 } else if (native_mode->clock == drm_mode->clock &&
5963 native_mode->htotal == drm_mode->htotal &&
5964 native_mode->vtotal == drm_mode->vtotal) {
5965 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5967 /* no scaling nor amdgpu inserted, no need to patch */
5971 static struct dc_sink *
5972 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5974 struct dc_sink_init_data sink_init_data = { 0 };
5975 struct dc_sink *sink = NULL;
5976 sink_init_data.link = aconnector->dc_link;
5977 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5979 sink = dc_sink_create(&sink_init_data);
5981 DRM_ERROR("Failed to create sink!\n");
5984 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5989 static void set_multisync_trigger_params(
5990 struct dc_stream_state *stream)
5992 struct dc_stream_state *master = NULL;
5994 if (stream->triggered_crtc_reset.enabled) {
5995 master = stream->triggered_crtc_reset.event_source;
5996 stream->triggered_crtc_reset.event =
5997 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5998 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5999 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6003 static void set_master_stream(struct dc_stream_state *stream_set[],
6006 int j, highest_rfr = 0, master_stream = 0;
6008 for (j = 0; j < stream_count; j++) {
6009 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6010 int refresh_rate = 0;
6012 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6013 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6014 if (refresh_rate > highest_rfr) {
6015 highest_rfr = refresh_rate;
6020 for (j = 0; j < stream_count; j++) {
6022 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6026 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6029 struct dc_stream_state *stream;
6031 if (context->stream_count < 2)
6033 for (i = 0; i < context->stream_count ; i++) {
6034 if (!context->streams[i])
6037 * TODO: add a function to read AMD VSDB bits and set
6038 * crtc_sync_master.multi_sync_enabled flag
6039 * For now it's set to false
6043 set_master_stream(context->streams, context->stream_count);
6045 for (i = 0; i < context->stream_count ; i++) {
6046 stream = context->streams[i];
6051 set_multisync_trigger_params(stream);
6055 #if defined(CONFIG_DRM_AMD_DC_DCN)
6056 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6057 struct dc_sink *sink, struct dc_stream_state *stream,
6058 struct dsc_dec_dpcd_caps *dsc_caps)
6060 stream->timing.flags.DSC = 0;
6062 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6063 sink->sink_signal == SIGNAL_TYPE_EDP)) {
6064 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6065 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6066 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6071 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6072 struct dc_sink *sink, struct dc_stream_state *stream,
6073 struct dsc_dec_dpcd_caps *dsc_caps,
6074 uint32_t max_dsc_target_bpp_limit_override)
6076 const struct dc_link_settings *verified_link_cap = NULL;
6077 uint32_t link_bw_in_kbps;
6078 uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6079 struct dc *dc = sink->ctx->dc;
6080 struct dc_dsc_bw_range bw_range = {0};
6081 struct dc_dsc_config dsc_cfg = {0};
6083 verified_link_cap = dc_link_get_link_cap(stream->link);
6084 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6085 edp_min_bpp_x16 = 8 * 16;
6086 edp_max_bpp_x16 = 8 * 16;
6088 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6089 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6091 if (edp_max_bpp_x16 < edp_min_bpp_x16)
6092 edp_min_bpp_x16 = edp_max_bpp_x16;
6094 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6095 dc->debug.dsc_min_slice_height_override,
6096 edp_min_bpp_x16, edp_max_bpp_x16,
6101 if (bw_range.max_kbps < link_bw_in_kbps) {
6102 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6104 dc->debug.dsc_min_slice_height_override,
6105 max_dsc_target_bpp_limit_override,
6109 stream->timing.dsc_cfg = dsc_cfg;
6110 stream->timing.flags.DSC = 1;
6111 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6117 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6119 dc->debug.dsc_min_slice_height_override,
6120 max_dsc_target_bpp_limit_override,
6124 stream->timing.dsc_cfg = dsc_cfg;
6125 stream->timing.flags.DSC = 1;
6129 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6130 struct dc_sink *sink, struct dc_stream_state *stream,
6131 struct dsc_dec_dpcd_caps *dsc_caps)
6133 struct drm_connector *drm_connector = &aconnector->base;
6134 uint32_t link_bandwidth_kbps;
6135 uint32_t max_dsc_target_bpp_limit_override = 0;
6136 struct dc *dc = sink->ctx->dc;
6138 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6139 dc_link_get_link_cap(aconnector->dc_link));
6141 if (stream->link && stream->link->local_sink)
6142 max_dsc_target_bpp_limit_override =
6143 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6145 /* Set DSC policy according to dsc_clock_en */
6146 dc_dsc_policy_set_enable_dsc_when_not_needed(
6147 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6149 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6150 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6152 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6154 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6156 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6158 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6159 max_dsc_target_bpp_limit_override,
6160 link_bandwidth_kbps,
6162 &stream->timing.dsc_cfg)) {
6163 stream->timing.flags.DSC = 1;
6164 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
6168 /* Overwrite the stream flag if DSC is enabled through debugfs */
6169 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6170 stream->timing.flags.DSC = 1;
6172 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6173 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6175 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6176 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6178 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6179 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6181 #endif /* CONFIG_DRM_AMD_DC_DCN */
6184 * DOC: FreeSync Video
6186 * When a userspace application wants to play a video, the content follows a
6187 * standard format definition that usually specifies the FPS for that format.
6188 * The below list illustrates some video format and the expected FPS,
6191 * - TV/NTSC (23.976 FPS)
6194 * - TV/NTSC (29.97 FPS)
6195 * - TV/NTSC (30 FPS)
6196 * - Cinema HFR (48 FPS)
6198 * - Commonly used (60 FPS)
6199 * - Multiples of 24 (48,72,96,120 FPS)
6201 * The list of standards video format is not huge and can be added to the
6202 * connector modeset list beforehand. With that, userspace can leverage
6203 * FreeSync to extends the front porch in order to attain the target refresh
6204 * rate. Such a switch will happen seamlessly, without screen blanking or
6205 * reprogramming of the output in any other way. If the userspace requests a
6206 * modesetting change compatible with FreeSync modes that only differ in the
6207 * refresh rate, DC will skip the full update and avoid blink during the
6208 * transition. For example, the video player can change the modesetting from
6209 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6210 * causing any display blink. This same concept can be applied to a mode
6213 static struct drm_display_mode *
6214 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6215 bool use_probed_modes)
6217 struct drm_display_mode *m, *m_pref = NULL;
6218 u16 current_refresh, highest_refresh;
6219 struct list_head *list_head = use_probed_modes ?
6220 &aconnector->base.probed_modes :
6221 &aconnector->base.modes;
6223 if (aconnector->freesync_vid_base.clock != 0)
6224 return &aconnector->freesync_vid_base;
6226 /* Find the preferred mode */
6227 list_for_each_entry (m, list_head, head) {
6228 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6235 /* Probably an EDID with no preferred mode. Fallback to first entry */
6236 m_pref = list_first_entry_or_null(
6237 &aconnector->base.modes, struct drm_display_mode, head);
6239 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6244 highest_refresh = drm_mode_vrefresh(m_pref);
6247 * Find the mode with highest refresh rate with same resolution.
6248 * For some monitors, preferred mode is not the mode with highest
6249 * supported refresh rate.
6251 list_for_each_entry (m, list_head, head) {
6252 current_refresh = drm_mode_vrefresh(m);
6254 if (m->hdisplay == m_pref->hdisplay &&
6255 m->vdisplay == m_pref->vdisplay &&
6256 highest_refresh < current_refresh) {
6257 highest_refresh = current_refresh;
6262 aconnector->freesync_vid_base = *m_pref;
6266 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6267 struct amdgpu_dm_connector *aconnector)
6269 struct drm_display_mode *high_mode;
6272 high_mode = get_highest_refresh_rate_mode(aconnector, false);
6273 if (!high_mode || !mode)
6276 timing_diff = high_mode->vtotal - mode->vtotal;
6278 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6279 high_mode->hdisplay != mode->hdisplay ||
6280 high_mode->vdisplay != mode->vdisplay ||
6281 high_mode->hsync_start != mode->hsync_start ||
6282 high_mode->hsync_end != mode->hsync_end ||
6283 high_mode->htotal != mode->htotal ||
6284 high_mode->hskew != mode->hskew ||
6285 high_mode->vscan != mode->vscan ||
6286 high_mode->vsync_start - mode->vsync_start != timing_diff ||
6287 high_mode->vsync_end - mode->vsync_end != timing_diff)
6293 static struct dc_stream_state *
6294 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6295 const struct drm_display_mode *drm_mode,
6296 const struct dm_connector_state *dm_state,
6297 const struct dc_stream_state *old_stream,
6300 struct drm_display_mode *preferred_mode = NULL;
6301 struct drm_connector *drm_connector;
6302 const struct drm_connector_state *con_state =
6303 dm_state ? &dm_state->base : NULL;
6304 struct dc_stream_state *stream = NULL;
6305 struct drm_display_mode mode = *drm_mode;
6306 struct drm_display_mode saved_mode;
6307 struct drm_display_mode *freesync_mode = NULL;
6308 bool native_mode_found = false;
6309 bool recalculate_timing = false;
6310 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6312 int preferred_refresh = 0;
6313 #if defined(CONFIG_DRM_AMD_DC_DCN)
6314 struct dsc_dec_dpcd_caps dsc_caps;
6316 struct dc_sink *sink = NULL;
6318 memset(&saved_mode, 0, sizeof(saved_mode));
6320 if (aconnector == NULL) {
6321 DRM_ERROR("aconnector is NULL!\n");
6325 drm_connector = &aconnector->base;
6327 if (!aconnector->dc_sink) {
6328 sink = create_fake_sink(aconnector);
6332 sink = aconnector->dc_sink;
6333 dc_sink_retain(sink);
6336 stream = dc_create_stream_for_sink(sink);
6338 if (stream == NULL) {
6339 DRM_ERROR("Failed to create stream for sink!\n");
6343 stream->dm_stream_context = aconnector;
6345 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6346 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6348 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6349 /* Search for preferred mode */
6350 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6351 native_mode_found = true;
6355 if (!native_mode_found)
6356 preferred_mode = list_first_entry_or_null(
6357 &aconnector->base.modes,
6358 struct drm_display_mode,
6361 mode_refresh = drm_mode_vrefresh(&mode);
6363 if (preferred_mode == NULL) {
6365 * This may not be an error, the use case is when we have no
6366 * usermode calls to reset and set mode upon hotplug. In this
6367 * case, we call set mode ourselves to restore the previous mode
6368 * and the modelist may not be filled in in time.
6370 DRM_DEBUG_DRIVER("No preferred mode found\n");
6372 recalculate_timing = amdgpu_freesync_vid_mode &&
6373 is_freesync_video_mode(&mode, aconnector);
6374 if (recalculate_timing) {
6375 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6377 mode = *freesync_mode;
6379 decide_crtc_timing_for_drm_display_mode(
6380 &mode, preferred_mode, scale);
6382 preferred_refresh = drm_mode_vrefresh(preferred_mode);
6386 if (recalculate_timing)
6387 drm_mode_set_crtcinfo(&saved_mode, 0);
6389 drm_mode_set_crtcinfo(&mode, 0);
6392 * If scaling is enabled and refresh rate didn't change
6393 * we copy the vic and polarities of the old timings
6395 if (!scale || mode_refresh != preferred_refresh)
6396 fill_stream_properties_from_drm_display_mode(
6397 stream, &mode, &aconnector->base, con_state, NULL,
6400 fill_stream_properties_from_drm_display_mode(
6401 stream, &mode, &aconnector->base, con_state, old_stream,
6404 #if defined(CONFIG_DRM_AMD_DC_DCN)
6405 /* SST DSC determination policy */
6406 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6407 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6408 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6411 update_stream_scaling_settings(&mode, dm_state, stream);
6414 &stream->audio_info,
6418 update_stream_signal(stream, sink);
6420 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6421 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6423 if (stream->link->psr_settings.psr_feature_enabled) {
6425 // should decide stream support vsc sdp colorimetry capability
6426 // before building vsc info packet
6428 stream->use_vsc_sdp_for_colorimetry = false;
6429 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6430 stream->use_vsc_sdp_for_colorimetry =
6431 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6433 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6434 stream->use_vsc_sdp_for_colorimetry = true;
6436 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6437 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6441 dc_sink_release(sink);
6446 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6448 drm_crtc_cleanup(crtc);
6452 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6453 struct drm_crtc_state *state)
6455 struct dm_crtc_state *cur = to_dm_crtc_state(state);
6457 /* TODO Destroy dc_stream objects are stream object is flattened */
6459 dc_stream_release(cur->stream);
6462 __drm_atomic_helper_crtc_destroy_state(state);
6468 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6470 struct dm_crtc_state *state;
6473 dm_crtc_destroy_state(crtc, crtc->state);
6475 state = kzalloc(sizeof(*state), GFP_KERNEL);
6476 if (WARN_ON(!state))
6479 __drm_atomic_helper_crtc_reset(crtc, &state->base);
6482 static struct drm_crtc_state *
6483 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6485 struct dm_crtc_state *state, *cur;
6487 cur = to_dm_crtc_state(crtc->state);
6489 if (WARN_ON(!crtc->state))
6492 state = kzalloc(sizeof(*state), GFP_KERNEL);
6496 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6499 state->stream = cur->stream;
6500 dc_stream_retain(state->stream);
6503 state->active_planes = cur->active_planes;
6504 state->vrr_infopacket = cur->vrr_infopacket;
6505 state->abm_level = cur->abm_level;
6506 state->vrr_supported = cur->vrr_supported;
6507 state->freesync_config = cur->freesync_config;
6508 state->cm_has_degamma = cur->cm_has_degamma;
6509 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6510 state->force_dpms_off = cur->force_dpms_off;
6511 /* TODO Duplicate dc_stream after objects are stream object is flattened */
6513 return &state->base;
6516 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6517 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6519 crtc_debugfs_init(crtc);
6525 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6527 enum dc_irq_source irq_source;
6528 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6529 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6532 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6534 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6536 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6537 acrtc->crtc_id, enable ? "en" : "dis", rc);
6541 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6543 enum dc_irq_source irq_source;
6544 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6545 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6546 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6547 #if defined(CONFIG_DRM_AMD_DC_DCN)
6548 struct amdgpu_display_manager *dm = &adev->dm;
6549 struct vblank_control_work *work;
6554 /* vblank irq on -> Only need vupdate irq in vrr mode */
6555 if (amdgpu_dm_vrr_active(acrtc_state))
6556 rc = dm_set_vupdate_irq(crtc, true);
6558 /* vblank irq off -> vupdate irq off */
6559 rc = dm_set_vupdate_irq(crtc, false);
6565 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6567 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6570 if (amdgpu_in_reset(adev))
6573 #if defined(CONFIG_DRM_AMD_DC_DCN)
6574 if (dm->vblank_control_workqueue) {
6575 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6579 INIT_WORK(&work->work, vblank_control_worker);
6581 work->acrtc = acrtc;
6582 work->enable = enable;
6584 if (acrtc_state->stream) {
6585 dc_stream_retain(acrtc_state->stream);
6586 work->stream = acrtc_state->stream;
6589 queue_work(dm->vblank_control_workqueue, &work->work);
6596 static int dm_enable_vblank(struct drm_crtc *crtc)
6598 return dm_set_vblank(crtc, true);
6601 static void dm_disable_vblank(struct drm_crtc *crtc)
6603 dm_set_vblank(crtc, false);
6606 /* Implemented only the options currently availible for the driver */
6607 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6608 .reset = dm_crtc_reset_state,
6609 .destroy = amdgpu_dm_crtc_destroy,
6610 .set_config = drm_atomic_helper_set_config,
6611 .page_flip = drm_atomic_helper_page_flip,
6612 .atomic_duplicate_state = dm_crtc_duplicate_state,
6613 .atomic_destroy_state = dm_crtc_destroy_state,
6614 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6615 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6616 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6617 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6618 .enable_vblank = dm_enable_vblank,
6619 .disable_vblank = dm_disable_vblank,
6620 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6621 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6622 .late_register = amdgpu_dm_crtc_late_register,
6626 static enum drm_connector_status
6627 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6630 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6634 * 1. This interface is NOT called in context of HPD irq.
6635 * 2. This interface *is called* in context of user-mode ioctl. Which
6636 * makes it a bad place for *any* MST-related activity.
6639 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6640 !aconnector->fake_enable)
6641 connected = (aconnector->dc_sink != NULL);
6643 connected = (aconnector->base.force == DRM_FORCE_ON);
6645 update_subconnector_property(aconnector);
6647 return (connected ? connector_status_connected :
6648 connector_status_disconnected);
6651 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6652 struct drm_connector_state *connector_state,
6653 struct drm_property *property,
6656 struct drm_device *dev = connector->dev;
6657 struct amdgpu_device *adev = drm_to_adev(dev);
6658 struct dm_connector_state *dm_old_state =
6659 to_dm_connector_state(connector->state);
6660 struct dm_connector_state *dm_new_state =
6661 to_dm_connector_state(connector_state);
6665 if (property == dev->mode_config.scaling_mode_property) {
6666 enum amdgpu_rmx_type rmx_type;
6669 case DRM_MODE_SCALE_CENTER:
6670 rmx_type = RMX_CENTER;
6672 case DRM_MODE_SCALE_ASPECT:
6673 rmx_type = RMX_ASPECT;
6675 case DRM_MODE_SCALE_FULLSCREEN:
6676 rmx_type = RMX_FULL;
6678 case DRM_MODE_SCALE_NONE:
6684 if (dm_old_state->scaling == rmx_type)
6687 dm_new_state->scaling = rmx_type;
6689 } else if (property == adev->mode_info.underscan_hborder_property) {
6690 dm_new_state->underscan_hborder = val;
6692 } else if (property == adev->mode_info.underscan_vborder_property) {
6693 dm_new_state->underscan_vborder = val;
6695 } else if (property == adev->mode_info.underscan_property) {
6696 dm_new_state->underscan_enable = val;
6698 } else if (property == adev->mode_info.abm_level_property) {
6699 dm_new_state->abm_level = val;
6706 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6707 const struct drm_connector_state *state,
6708 struct drm_property *property,
6711 struct drm_device *dev = connector->dev;
6712 struct amdgpu_device *adev = drm_to_adev(dev);
6713 struct dm_connector_state *dm_state =
6714 to_dm_connector_state(state);
6717 if (property == dev->mode_config.scaling_mode_property) {
6718 switch (dm_state->scaling) {
6720 *val = DRM_MODE_SCALE_CENTER;
6723 *val = DRM_MODE_SCALE_ASPECT;
6726 *val = DRM_MODE_SCALE_FULLSCREEN;
6730 *val = DRM_MODE_SCALE_NONE;
6734 } else if (property == adev->mode_info.underscan_hborder_property) {
6735 *val = dm_state->underscan_hborder;
6737 } else if (property == adev->mode_info.underscan_vborder_property) {
6738 *val = dm_state->underscan_vborder;
6740 } else if (property == adev->mode_info.underscan_property) {
6741 *val = dm_state->underscan_enable;
6743 } else if (property == adev->mode_info.abm_level_property) {
6744 *val = dm_state->abm_level;
6751 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6753 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6755 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6758 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6760 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6761 const struct dc_link *link = aconnector->dc_link;
6762 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6763 struct amdgpu_display_manager *dm = &adev->dm;
6767 * Call only if mst_mgr was iniitalized before since it's not done
6768 * for all connector types.
6770 if (aconnector->mst_mgr.dev)
6771 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6773 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6774 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6775 for (i = 0; i < dm->num_of_edps; i++) {
6776 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6777 backlight_device_unregister(dm->backlight_dev[i]);
6778 dm->backlight_dev[i] = NULL;
6783 if (aconnector->dc_em_sink)
6784 dc_sink_release(aconnector->dc_em_sink);
6785 aconnector->dc_em_sink = NULL;
6786 if (aconnector->dc_sink)
6787 dc_sink_release(aconnector->dc_sink);
6788 aconnector->dc_sink = NULL;
6790 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6791 drm_connector_unregister(connector);
6792 drm_connector_cleanup(connector);
6793 if (aconnector->i2c) {
6794 i2c_del_adapter(&aconnector->i2c->base);
6795 kfree(aconnector->i2c);
6797 kfree(aconnector->dm_dp_aux.aux.name);
6802 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6804 struct dm_connector_state *state =
6805 to_dm_connector_state(connector->state);
6807 if (connector->state)
6808 __drm_atomic_helper_connector_destroy_state(connector->state);
6812 state = kzalloc(sizeof(*state), GFP_KERNEL);
6815 state->scaling = RMX_OFF;
6816 state->underscan_enable = false;
6817 state->underscan_hborder = 0;
6818 state->underscan_vborder = 0;
6819 state->base.max_requested_bpc = 8;
6820 state->vcpi_slots = 0;
6822 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6823 state->abm_level = amdgpu_dm_abm_level;
6825 __drm_atomic_helper_connector_reset(connector, &state->base);
6829 struct drm_connector_state *
6830 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6832 struct dm_connector_state *state =
6833 to_dm_connector_state(connector->state);
6835 struct dm_connector_state *new_state =
6836 kmemdup(state, sizeof(*state), GFP_KERNEL);
6841 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6843 new_state->freesync_capable = state->freesync_capable;
6844 new_state->abm_level = state->abm_level;
6845 new_state->scaling = state->scaling;
6846 new_state->underscan_enable = state->underscan_enable;
6847 new_state->underscan_hborder = state->underscan_hborder;
6848 new_state->underscan_vborder = state->underscan_vborder;
6849 new_state->vcpi_slots = state->vcpi_slots;
6850 new_state->pbn = state->pbn;
6851 return &new_state->base;
6855 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6857 struct amdgpu_dm_connector *amdgpu_dm_connector =
6858 to_amdgpu_dm_connector(connector);
6861 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6862 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6863 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6864 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6869 #if defined(CONFIG_DEBUG_FS)
6870 connector_debugfs_init(amdgpu_dm_connector);
6876 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6877 .reset = amdgpu_dm_connector_funcs_reset,
6878 .detect = amdgpu_dm_connector_detect,
6879 .fill_modes = drm_helper_probe_single_connector_modes,
6880 .destroy = amdgpu_dm_connector_destroy,
6881 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6882 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6883 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6884 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6885 .late_register = amdgpu_dm_connector_late_register,
6886 .early_unregister = amdgpu_dm_connector_unregister
6889 static int get_modes(struct drm_connector *connector)
6891 return amdgpu_dm_connector_get_modes(connector);
6894 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6896 struct dc_sink_init_data init_params = {
6897 .link = aconnector->dc_link,
6898 .sink_signal = SIGNAL_TYPE_VIRTUAL
6902 if (!aconnector->base.edid_blob_ptr) {
6903 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6904 aconnector->base.name);
6906 aconnector->base.force = DRM_FORCE_OFF;
6907 aconnector->base.override_edid = false;
6911 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6913 aconnector->edid = edid;
6915 aconnector->dc_em_sink = dc_link_add_remote_sink(
6916 aconnector->dc_link,
6918 (edid->extensions + 1) * EDID_LENGTH,
6921 if (aconnector->base.force == DRM_FORCE_ON) {
6922 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6923 aconnector->dc_link->local_sink :
6924 aconnector->dc_em_sink;
6925 dc_sink_retain(aconnector->dc_sink);
6929 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6931 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6934 * In case of headless boot with force on for DP managed connector
6935 * Those settings have to be != 0 to get initial modeset
6937 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6938 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6939 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6943 aconnector->base.override_edid = true;
6944 create_eml_sink(aconnector);
6947 static struct dc_stream_state *
6948 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6949 const struct drm_display_mode *drm_mode,
6950 const struct dm_connector_state *dm_state,
6951 const struct dc_stream_state *old_stream)
6953 struct drm_connector *connector = &aconnector->base;
6954 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6955 struct dc_stream_state *stream;
6956 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6957 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6958 enum dc_status dc_result = DC_OK;
6961 stream = create_stream_for_sink(aconnector, drm_mode,
6962 dm_state, old_stream,
6964 if (stream == NULL) {
6965 DRM_ERROR("Failed to create stream for sink!\n");
6969 dc_result = dc_validate_stream(adev->dm.dc, stream);
6971 if (dc_result != DC_OK) {
6972 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6977 dc_status_to_str(dc_result));
6979 dc_stream_release(stream);
6981 requested_bpc -= 2; /* lower bpc to retry validation */
6984 } while (stream == NULL && requested_bpc >= 6);
6986 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6987 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6989 aconnector->force_yuv420_output = true;
6990 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6991 dm_state, old_stream);
6992 aconnector->force_yuv420_output = false;
6998 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6999 struct drm_display_mode *mode)
7001 int result = MODE_ERROR;
7002 struct dc_sink *dc_sink;
7003 /* TODO: Unhardcode stream count */
7004 struct dc_stream_state *stream;
7005 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7007 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7008 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7012 * Only run this the first time mode_valid is called to initilialize
7015 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7016 !aconnector->dc_em_sink)
7017 handle_edid_mgmt(aconnector);
7019 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7021 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7022 aconnector->base.force != DRM_FORCE_ON) {
7023 DRM_ERROR("dc_sink is NULL!\n");
7027 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7029 dc_stream_release(stream);
7034 /* TODO: error handling*/
7038 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7039 struct dc_info_packet *out)
7041 struct hdmi_drm_infoframe frame;
7042 unsigned char buf[30]; /* 26 + 4 */
7046 memset(out, 0, sizeof(*out));
7048 if (!state->hdr_output_metadata)
7051 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7055 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7059 /* Static metadata is a fixed 26 bytes + 4 byte header. */
7063 /* Prepare the infopacket for DC. */
7064 switch (state->connector->connector_type) {
7065 case DRM_MODE_CONNECTOR_HDMIA:
7066 out->hb0 = 0x87; /* type */
7067 out->hb1 = 0x01; /* version */
7068 out->hb2 = 0x1A; /* length */
7069 out->sb[0] = buf[3]; /* checksum */
7073 case DRM_MODE_CONNECTOR_DisplayPort:
7074 case DRM_MODE_CONNECTOR_eDP:
7075 out->hb0 = 0x00; /* sdp id, zero */
7076 out->hb1 = 0x87; /* type */
7077 out->hb2 = 0x1D; /* payload len - 1 */
7078 out->hb3 = (0x13 << 2); /* sdp version */
7079 out->sb[0] = 0x01; /* version */
7080 out->sb[1] = 0x1A; /* length */
7088 memcpy(&out->sb[i], &buf[4], 26);
7091 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7092 sizeof(out->sb), false);
7098 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7099 struct drm_atomic_state *state)
7101 struct drm_connector_state *new_con_state =
7102 drm_atomic_get_new_connector_state(state, conn);
7103 struct drm_connector_state *old_con_state =
7104 drm_atomic_get_old_connector_state(state, conn);
7105 struct drm_crtc *crtc = new_con_state->crtc;
7106 struct drm_crtc_state *new_crtc_state;
7109 trace_amdgpu_dm_connector_atomic_check(new_con_state);
7114 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7115 struct dc_info_packet hdr_infopacket;
7117 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7121 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7122 if (IS_ERR(new_crtc_state))
7123 return PTR_ERR(new_crtc_state);
7126 * DC considers the stream backends changed if the
7127 * static metadata changes. Forcing the modeset also
7128 * gives a simple way for userspace to switch from
7129 * 8bpc to 10bpc when setting the metadata to enter
7132 * Changing the static metadata after it's been
7133 * set is permissible, however. So only force a
7134 * modeset if we're entering or exiting HDR.
7136 new_crtc_state->mode_changed =
7137 !old_con_state->hdr_output_metadata ||
7138 !new_con_state->hdr_output_metadata;
7144 static const struct drm_connector_helper_funcs
7145 amdgpu_dm_connector_helper_funcs = {
7147 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7148 * modes will be filtered by drm_mode_validate_size(), and those modes
7149 * are missing after user start lightdm. So we need to renew modes list.
7150 * in get_modes call back, not just return the modes count
7152 .get_modes = get_modes,
7153 .mode_valid = amdgpu_dm_connector_mode_valid,
7154 .atomic_check = amdgpu_dm_connector_atomic_check,
7157 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7161 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7163 struct drm_atomic_state *state = new_crtc_state->state;
7164 struct drm_plane *plane;
7167 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7168 struct drm_plane_state *new_plane_state;
7170 /* Cursor planes are "fake". */
7171 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7174 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7176 if (!new_plane_state) {
7178 * The plane is enable on the CRTC and hasn't changed
7179 * state. This means that it previously passed
7180 * validation and is therefore enabled.
7186 /* We need a framebuffer to be considered enabled. */
7187 num_active += (new_plane_state->fb != NULL);
7193 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7194 struct drm_crtc_state *new_crtc_state)
7196 struct dm_crtc_state *dm_new_crtc_state =
7197 to_dm_crtc_state(new_crtc_state);
7199 dm_new_crtc_state->active_planes = 0;
7201 if (!dm_new_crtc_state->stream)
7204 dm_new_crtc_state->active_planes =
7205 count_crtc_active_planes(new_crtc_state);
7208 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7209 struct drm_atomic_state *state)
7211 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7213 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7214 struct dc *dc = adev->dm.dc;
7215 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7218 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7220 dm_update_crtc_active_planes(crtc, crtc_state);
7222 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7223 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7228 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7229 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7230 * planes are disabled, which is not supported by the hardware. And there is legacy
7231 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7233 if (crtc_state->enable &&
7234 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7235 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7239 /* In some use cases, like reset, no stream is attached */
7240 if (!dm_crtc_state->stream)
7243 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7246 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7250 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7251 const struct drm_display_mode *mode,
7252 struct drm_display_mode *adjusted_mode)
7257 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7258 .disable = dm_crtc_helper_disable,
7259 .atomic_check = dm_crtc_helper_atomic_check,
7260 .mode_fixup = dm_crtc_helper_mode_fixup,
7261 .get_scanout_position = amdgpu_crtc_get_scanout_position,
7264 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7269 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7271 switch (display_color_depth) {
7272 case COLOR_DEPTH_666:
7274 case COLOR_DEPTH_888:
7276 case COLOR_DEPTH_101010:
7278 case COLOR_DEPTH_121212:
7280 case COLOR_DEPTH_141414:
7282 case COLOR_DEPTH_161616:
7290 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7291 struct drm_crtc_state *crtc_state,
7292 struct drm_connector_state *conn_state)
7294 struct drm_atomic_state *state = crtc_state->state;
7295 struct drm_connector *connector = conn_state->connector;
7296 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7297 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7298 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7299 struct drm_dp_mst_topology_mgr *mst_mgr;
7300 struct drm_dp_mst_port *mst_port;
7301 enum dc_color_depth color_depth;
7303 bool is_y420 = false;
7305 if (!aconnector->port || !aconnector->dc_sink)
7308 mst_port = aconnector->port;
7309 mst_mgr = &aconnector->mst_port->mst_mgr;
7311 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7314 if (!state->duplicated) {
7315 int max_bpc = conn_state->max_requested_bpc;
7316 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7317 aconnector->force_yuv420_output;
7318 color_depth = convert_color_depth_from_display_info(connector,
7321 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7322 clock = adjusted_mode->clock;
7323 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7325 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7328 dm_new_connector_state->pbn,
7329 dm_mst_get_pbn_divider(aconnector->dc_link));
7330 if (dm_new_connector_state->vcpi_slots < 0) {
7331 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7332 return dm_new_connector_state->vcpi_slots;
7337 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7338 .disable = dm_encoder_helper_disable,
7339 .atomic_check = dm_encoder_helper_atomic_check
7342 #if defined(CONFIG_DRM_AMD_DC_DCN)
7343 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7344 struct dc_state *dc_state,
7345 struct dsc_mst_fairness_vars *vars)
7347 struct dc_stream_state *stream = NULL;
7348 struct drm_connector *connector;
7349 struct drm_connector_state *new_con_state;
7350 struct amdgpu_dm_connector *aconnector;
7351 struct dm_connector_state *dm_conn_state;
7353 int vcpi, pbn_div, pbn, slot_num = 0;
7355 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7357 aconnector = to_amdgpu_dm_connector(connector);
7359 if (!aconnector->port)
7362 if (!new_con_state || !new_con_state->crtc)
7365 dm_conn_state = to_dm_connector_state(new_con_state);
7367 for (j = 0; j < dc_state->stream_count; j++) {
7368 stream = dc_state->streams[j];
7372 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7381 pbn_div = dm_mst_get_pbn_divider(stream->link);
7382 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7383 for (j = 0; j < dc_state->stream_count; j++) {
7384 if (vars[j].aconnector == aconnector) {
7390 if (j == dc_state->stream_count)
7393 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7395 if (stream->timing.flags.DSC != 1) {
7396 dm_conn_state->pbn = pbn;
7397 dm_conn_state->vcpi_slots = slot_num;
7399 drm_dp_mst_atomic_enable_dsc(state,
7407 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7414 dm_conn_state->pbn = pbn;
7415 dm_conn_state->vcpi_slots = vcpi;
7421 static void dm_drm_plane_reset(struct drm_plane *plane)
7423 struct dm_plane_state *amdgpu_state = NULL;
7426 plane->funcs->atomic_destroy_state(plane, plane->state);
7428 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7429 WARN_ON(amdgpu_state == NULL);
7432 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7435 static struct drm_plane_state *
7436 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7438 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7440 old_dm_plane_state = to_dm_plane_state(plane->state);
7441 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7442 if (!dm_plane_state)
7445 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7447 if (old_dm_plane_state->dc_state) {
7448 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7449 dc_plane_state_retain(dm_plane_state->dc_state);
7452 return &dm_plane_state->base;
7455 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7456 struct drm_plane_state *state)
7458 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7460 if (dm_plane_state->dc_state)
7461 dc_plane_state_release(dm_plane_state->dc_state);
7463 drm_atomic_helper_plane_destroy_state(plane, state);
7466 static const struct drm_plane_funcs dm_plane_funcs = {
7467 .update_plane = drm_atomic_helper_update_plane,
7468 .disable_plane = drm_atomic_helper_disable_plane,
7469 .destroy = drm_primary_helper_destroy,
7470 .reset = dm_drm_plane_reset,
7471 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7472 .atomic_destroy_state = dm_drm_plane_destroy_state,
7473 .format_mod_supported = dm_plane_format_mod_supported,
7476 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7477 struct drm_plane_state *new_state)
7479 struct amdgpu_framebuffer *afb;
7480 struct drm_gem_object *obj;
7481 struct amdgpu_device *adev;
7482 struct amdgpu_bo *rbo;
7483 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7484 struct list_head list;
7485 struct ttm_validate_buffer tv;
7486 struct ww_acquire_ctx ticket;
7490 if (!new_state->fb) {
7491 DRM_DEBUG_KMS("No FB bound\n");
7495 afb = to_amdgpu_framebuffer(new_state->fb);
7496 obj = new_state->fb->obj[0];
7497 rbo = gem_to_amdgpu_bo(obj);
7498 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7499 INIT_LIST_HEAD(&list);
7503 list_add(&tv.head, &list);
7505 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7507 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7511 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7512 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7514 domain = AMDGPU_GEM_DOMAIN_VRAM;
7516 r = amdgpu_bo_pin(rbo, domain);
7517 if (unlikely(r != 0)) {
7518 if (r != -ERESTARTSYS)
7519 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7520 ttm_eu_backoff_reservation(&ticket, &list);
7524 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7525 if (unlikely(r != 0)) {
7526 amdgpu_bo_unpin(rbo);
7527 ttm_eu_backoff_reservation(&ticket, &list);
7528 DRM_ERROR("%p bind failed\n", rbo);
7532 ttm_eu_backoff_reservation(&ticket, &list);
7534 afb->address = amdgpu_bo_gpu_offset(rbo);
7539 * We don't do surface updates on planes that have been newly created,
7540 * but we also don't have the afb->address during atomic check.
7542 * Fill in buffer attributes depending on the address here, but only on
7543 * newly created planes since they're not being used by DC yet and this
7544 * won't modify global state.
7546 dm_plane_state_old = to_dm_plane_state(plane->state);
7547 dm_plane_state_new = to_dm_plane_state(new_state);
7549 if (dm_plane_state_new->dc_state &&
7550 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7551 struct dc_plane_state *plane_state =
7552 dm_plane_state_new->dc_state;
7553 bool force_disable_dcc = !plane_state->dcc.enable;
7555 fill_plane_buffer_attributes(
7556 adev, afb, plane_state->format, plane_state->rotation,
7558 &plane_state->tiling_info, &plane_state->plane_size,
7559 &plane_state->dcc, &plane_state->address,
7560 afb->tmz_surface, force_disable_dcc);
7566 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7567 struct drm_plane_state *old_state)
7569 struct amdgpu_bo *rbo;
7575 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7576 r = amdgpu_bo_reserve(rbo, false);
7578 DRM_ERROR("failed to reserve rbo before unpin\n");
7582 amdgpu_bo_unpin(rbo);
7583 amdgpu_bo_unreserve(rbo);
7584 amdgpu_bo_unref(&rbo);
7587 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7588 struct drm_crtc_state *new_crtc_state)
7590 struct drm_framebuffer *fb = state->fb;
7591 int min_downscale, max_upscale;
7593 int max_scale = INT_MAX;
7595 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7596 if (fb && state->crtc) {
7597 /* Validate viewport to cover the case when only the position changes */
7598 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7599 int viewport_width = state->crtc_w;
7600 int viewport_height = state->crtc_h;
7602 if (state->crtc_x < 0)
7603 viewport_width += state->crtc_x;
7604 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7605 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7607 if (state->crtc_y < 0)
7608 viewport_height += state->crtc_y;
7609 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7610 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7612 if (viewport_width < 0 || viewport_height < 0) {
7613 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7615 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7616 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7618 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7619 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7625 /* Get min/max allowed scaling factors from plane caps. */
7626 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7627 &min_downscale, &max_upscale);
7629 * Convert to drm convention: 16.16 fixed point, instead of dc's
7630 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7631 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7633 min_scale = (1000 << 16) / max_upscale;
7634 max_scale = (1000 << 16) / min_downscale;
7637 return drm_atomic_helper_check_plane_state(
7638 state, new_crtc_state, min_scale, max_scale, true, true);
7641 static int dm_plane_atomic_check(struct drm_plane *plane,
7642 struct drm_atomic_state *state)
7644 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7646 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7647 struct dc *dc = adev->dm.dc;
7648 struct dm_plane_state *dm_plane_state;
7649 struct dc_scaling_info scaling_info;
7650 struct drm_crtc_state *new_crtc_state;
7653 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7655 dm_plane_state = to_dm_plane_state(new_plane_state);
7657 if (!dm_plane_state->dc_state)
7661 drm_atomic_get_new_crtc_state(state,
7662 new_plane_state->crtc);
7663 if (!new_crtc_state)
7666 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7670 ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7674 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7680 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7681 struct drm_atomic_state *state)
7683 /* Only support async updates on cursor planes. */
7684 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7690 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7691 struct drm_atomic_state *state)
7693 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7695 struct drm_plane_state *old_state =
7696 drm_atomic_get_old_plane_state(state, plane);
7698 trace_amdgpu_dm_atomic_update_cursor(new_state);
7700 swap(plane->state->fb, new_state->fb);
7702 plane->state->src_x = new_state->src_x;
7703 plane->state->src_y = new_state->src_y;
7704 plane->state->src_w = new_state->src_w;
7705 plane->state->src_h = new_state->src_h;
7706 plane->state->crtc_x = new_state->crtc_x;
7707 plane->state->crtc_y = new_state->crtc_y;
7708 plane->state->crtc_w = new_state->crtc_w;
7709 plane->state->crtc_h = new_state->crtc_h;
7711 handle_cursor_update(plane, old_state);
7714 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7715 .prepare_fb = dm_plane_helper_prepare_fb,
7716 .cleanup_fb = dm_plane_helper_cleanup_fb,
7717 .atomic_check = dm_plane_atomic_check,
7718 .atomic_async_check = dm_plane_atomic_async_check,
7719 .atomic_async_update = dm_plane_atomic_async_update
7723 * TODO: these are currently initialized to rgb formats only.
7724 * For future use cases we should either initialize them dynamically based on
7725 * plane capabilities, or initialize this array to all formats, so internal drm
7726 * check will succeed, and let DC implement proper check
7728 static const uint32_t rgb_formats[] = {
7729 DRM_FORMAT_XRGB8888,
7730 DRM_FORMAT_ARGB8888,
7731 DRM_FORMAT_RGBA8888,
7732 DRM_FORMAT_XRGB2101010,
7733 DRM_FORMAT_XBGR2101010,
7734 DRM_FORMAT_ARGB2101010,
7735 DRM_FORMAT_ABGR2101010,
7736 DRM_FORMAT_XRGB16161616,
7737 DRM_FORMAT_XBGR16161616,
7738 DRM_FORMAT_ARGB16161616,
7739 DRM_FORMAT_ABGR16161616,
7740 DRM_FORMAT_XBGR8888,
7741 DRM_FORMAT_ABGR8888,
7745 static const uint32_t overlay_formats[] = {
7746 DRM_FORMAT_XRGB8888,
7747 DRM_FORMAT_ARGB8888,
7748 DRM_FORMAT_RGBA8888,
7749 DRM_FORMAT_XBGR8888,
7750 DRM_FORMAT_ABGR8888,
7754 static const u32 cursor_formats[] = {
7758 static int get_plane_formats(const struct drm_plane *plane,
7759 const struct dc_plane_cap *plane_cap,
7760 uint32_t *formats, int max_formats)
7762 int i, num_formats = 0;
7765 * TODO: Query support for each group of formats directly from
7766 * DC plane caps. This will require adding more formats to the
7770 switch (plane->type) {
7771 case DRM_PLANE_TYPE_PRIMARY:
7772 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7773 if (num_formats >= max_formats)
7776 formats[num_formats++] = rgb_formats[i];
7779 if (plane_cap && plane_cap->pixel_format_support.nv12)
7780 formats[num_formats++] = DRM_FORMAT_NV12;
7781 if (plane_cap && plane_cap->pixel_format_support.p010)
7782 formats[num_formats++] = DRM_FORMAT_P010;
7783 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7784 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7785 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7786 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7787 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7791 case DRM_PLANE_TYPE_OVERLAY:
7792 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7793 if (num_formats >= max_formats)
7796 formats[num_formats++] = overlay_formats[i];
7800 case DRM_PLANE_TYPE_CURSOR:
7801 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7802 if (num_formats >= max_formats)
7805 formats[num_formats++] = cursor_formats[i];
7813 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7814 struct drm_plane *plane,
7815 unsigned long possible_crtcs,
7816 const struct dc_plane_cap *plane_cap)
7818 uint32_t formats[32];
7821 unsigned int supported_rotations;
7822 uint64_t *modifiers = NULL;
7824 num_formats = get_plane_formats(plane, plane_cap, formats,
7825 ARRAY_SIZE(formats));
7827 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7831 if (modifiers == NULL)
7832 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
7834 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7835 &dm_plane_funcs, formats, num_formats,
7836 modifiers, plane->type, NULL);
7841 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7842 plane_cap && plane_cap->per_pixel_alpha) {
7843 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7844 BIT(DRM_MODE_BLEND_PREMULTI);
7846 drm_plane_create_alpha_property(plane);
7847 drm_plane_create_blend_mode_property(plane, blend_caps);
7850 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7852 (plane_cap->pixel_format_support.nv12 ||
7853 plane_cap->pixel_format_support.p010)) {
7854 /* This only affects YUV formats. */
7855 drm_plane_create_color_properties(
7857 BIT(DRM_COLOR_YCBCR_BT601) |
7858 BIT(DRM_COLOR_YCBCR_BT709) |
7859 BIT(DRM_COLOR_YCBCR_BT2020),
7860 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7861 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7862 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7865 supported_rotations =
7866 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7867 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7869 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7870 plane->type != DRM_PLANE_TYPE_CURSOR)
7871 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7872 supported_rotations);
7874 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7876 /* Create (reset) the plane state */
7877 if (plane->funcs->reset)
7878 plane->funcs->reset(plane);
7883 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7884 struct drm_plane *plane,
7885 uint32_t crtc_index)
7887 struct amdgpu_crtc *acrtc = NULL;
7888 struct drm_plane *cursor_plane;
7892 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7896 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7897 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7899 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7903 res = drm_crtc_init_with_planes(
7908 &amdgpu_dm_crtc_funcs, NULL);
7913 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7915 /* Create (reset) the plane state */
7916 if (acrtc->base.funcs->reset)
7917 acrtc->base.funcs->reset(&acrtc->base);
7919 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7920 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7922 acrtc->crtc_id = crtc_index;
7923 acrtc->base.enabled = false;
7924 acrtc->otg_inst = -1;
7926 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7927 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7928 true, MAX_COLOR_LUT_ENTRIES);
7929 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7935 kfree(cursor_plane);
7940 static int to_drm_connector_type(enum signal_type st)
7943 case SIGNAL_TYPE_HDMI_TYPE_A:
7944 return DRM_MODE_CONNECTOR_HDMIA;
7945 case SIGNAL_TYPE_EDP:
7946 return DRM_MODE_CONNECTOR_eDP;
7947 case SIGNAL_TYPE_LVDS:
7948 return DRM_MODE_CONNECTOR_LVDS;
7949 case SIGNAL_TYPE_RGB:
7950 return DRM_MODE_CONNECTOR_VGA;
7951 case SIGNAL_TYPE_DISPLAY_PORT:
7952 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7953 return DRM_MODE_CONNECTOR_DisplayPort;
7954 case SIGNAL_TYPE_DVI_DUAL_LINK:
7955 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7956 return DRM_MODE_CONNECTOR_DVID;
7957 case SIGNAL_TYPE_VIRTUAL:
7958 return DRM_MODE_CONNECTOR_VIRTUAL;
7961 return DRM_MODE_CONNECTOR_Unknown;
7965 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7967 struct drm_encoder *encoder;
7969 /* There is only one encoder per connector */
7970 drm_connector_for_each_possible_encoder(connector, encoder)
7976 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7978 struct drm_encoder *encoder;
7979 struct amdgpu_encoder *amdgpu_encoder;
7981 encoder = amdgpu_dm_connector_to_encoder(connector);
7983 if (encoder == NULL)
7986 amdgpu_encoder = to_amdgpu_encoder(encoder);
7988 amdgpu_encoder->native_mode.clock = 0;
7990 if (!list_empty(&connector->probed_modes)) {
7991 struct drm_display_mode *preferred_mode = NULL;
7993 list_for_each_entry(preferred_mode,
7994 &connector->probed_modes,
7996 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7997 amdgpu_encoder->native_mode = *preferred_mode;
8005 static struct drm_display_mode *
8006 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8008 int hdisplay, int vdisplay)
8010 struct drm_device *dev = encoder->dev;
8011 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8012 struct drm_display_mode *mode = NULL;
8013 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8015 mode = drm_mode_duplicate(dev, native_mode);
8020 mode->hdisplay = hdisplay;
8021 mode->vdisplay = vdisplay;
8022 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8023 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8029 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8030 struct drm_connector *connector)
8032 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8033 struct drm_display_mode *mode = NULL;
8034 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8035 struct amdgpu_dm_connector *amdgpu_dm_connector =
8036 to_amdgpu_dm_connector(connector);
8040 char name[DRM_DISPLAY_MODE_LEN];
8043 } common_modes[] = {
8044 { "640x480", 640, 480},
8045 { "800x600", 800, 600},
8046 { "1024x768", 1024, 768},
8047 { "1280x720", 1280, 720},
8048 { "1280x800", 1280, 800},
8049 {"1280x1024", 1280, 1024},
8050 { "1440x900", 1440, 900},
8051 {"1680x1050", 1680, 1050},
8052 {"1600x1200", 1600, 1200},
8053 {"1920x1080", 1920, 1080},
8054 {"1920x1200", 1920, 1200}
8057 n = ARRAY_SIZE(common_modes);
8059 for (i = 0; i < n; i++) {
8060 struct drm_display_mode *curmode = NULL;
8061 bool mode_existed = false;
8063 if (common_modes[i].w > native_mode->hdisplay ||
8064 common_modes[i].h > native_mode->vdisplay ||
8065 (common_modes[i].w == native_mode->hdisplay &&
8066 common_modes[i].h == native_mode->vdisplay))
8069 list_for_each_entry(curmode, &connector->probed_modes, head) {
8070 if (common_modes[i].w == curmode->hdisplay &&
8071 common_modes[i].h == curmode->vdisplay) {
8072 mode_existed = true;
8080 mode = amdgpu_dm_create_common_mode(encoder,
8081 common_modes[i].name, common_modes[i].w,
8083 drm_mode_probed_add(connector, mode);
8084 amdgpu_dm_connector->num_modes++;
8088 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8090 struct drm_encoder *encoder;
8091 struct amdgpu_encoder *amdgpu_encoder;
8092 const struct drm_display_mode *native_mode;
8094 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8095 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8098 encoder = amdgpu_dm_connector_to_encoder(connector);
8102 amdgpu_encoder = to_amdgpu_encoder(encoder);
8104 native_mode = &amdgpu_encoder->native_mode;
8105 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8108 drm_connector_set_panel_orientation_with_quirk(connector,
8109 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8110 native_mode->hdisplay,
8111 native_mode->vdisplay);
8114 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8117 struct amdgpu_dm_connector *amdgpu_dm_connector =
8118 to_amdgpu_dm_connector(connector);
8121 /* empty probed_modes */
8122 INIT_LIST_HEAD(&connector->probed_modes);
8123 amdgpu_dm_connector->num_modes =
8124 drm_add_edid_modes(connector, edid);
8126 /* sorting the probed modes before calling function
8127 * amdgpu_dm_get_native_mode() since EDID can have
8128 * more than one preferred mode. The modes that are
8129 * later in the probed mode list could be of higher
8130 * and preferred resolution. For example, 3840x2160
8131 * resolution in base EDID preferred timing and 4096x2160
8132 * preferred resolution in DID extension block later.
8134 drm_mode_sort(&connector->probed_modes);
8135 amdgpu_dm_get_native_mode(connector);
8137 /* Freesync capabilities are reset by calling
8138 * drm_add_edid_modes() and need to be
8141 amdgpu_dm_update_freesync_caps(connector, edid);
8143 amdgpu_set_panel_orientation(connector);
8145 amdgpu_dm_connector->num_modes = 0;
8149 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8150 struct drm_display_mode *mode)
8152 struct drm_display_mode *m;
8154 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8155 if (drm_mode_equal(m, mode))
8162 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8164 const struct drm_display_mode *m;
8165 struct drm_display_mode *new_mode;
8167 uint32_t new_modes_count = 0;
8169 /* Standard FPS values
8178 * 60 - Commonly used
8179 * 48,72,96,120 - Multiples of 24
8181 static const uint32_t common_rates[] = {
8182 23976, 24000, 25000, 29970, 30000,
8183 48000, 50000, 60000, 72000, 96000, 120000
8187 * Find mode with highest refresh rate with the same resolution
8188 * as the preferred mode. Some monitors report a preferred mode
8189 * with lower resolution than the highest refresh rate supported.
8192 m = get_highest_refresh_rate_mode(aconnector, true);
8196 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8197 uint64_t target_vtotal, target_vtotal_diff;
8200 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8203 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8204 common_rates[i] > aconnector->max_vfreq * 1000)
8207 num = (unsigned long long)m->clock * 1000 * 1000;
8208 den = common_rates[i] * (unsigned long long)m->htotal;
8209 target_vtotal = div_u64(num, den);
8210 target_vtotal_diff = target_vtotal - m->vtotal;
8212 /* Check for illegal modes */
8213 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8214 m->vsync_end + target_vtotal_diff < m->vsync_start ||
8215 m->vtotal + target_vtotal_diff < m->vsync_end)
8218 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8222 new_mode->vtotal += (u16)target_vtotal_diff;
8223 new_mode->vsync_start += (u16)target_vtotal_diff;
8224 new_mode->vsync_end += (u16)target_vtotal_diff;
8225 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8226 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8228 if (!is_duplicate_mode(aconnector, new_mode)) {
8229 drm_mode_probed_add(&aconnector->base, new_mode);
8230 new_modes_count += 1;
8232 drm_mode_destroy(aconnector->base.dev, new_mode);
8235 return new_modes_count;
8238 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8241 struct amdgpu_dm_connector *amdgpu_dm_connector =
8242 to_amdgpu_dm_connector(connector);
8244 if (!(amdgpu_freesync_vid_mode && edid))
8247 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8248 amdgpu_dm_connector->num_modes +=
8249 add_fs_modes(amdgpu_dm_connector);
8252 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8254 struct amdgpu_dm_connector *amdgpu_dm_connector =
8255 to_amdgpu_dm_connector(connector);
8256 struct drm_encoder *encoder;
8257 struct edid *edid = amdgpu_dm_connector->edid;
8259 encoder = amdgpu_dm_connector_to_encoder(connector);
8261 if (!drm_edid_is_valid(edid)) {
8262 amdgpu_dm_connector->num_modes =
8263 drm_add_modes_noedid(connector, 640, 480);
8265 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8266 amdgpu_dm_connector_add_common_modes(encoder, connector);
8267 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8269 amdgpu_dm_fbc_init(connector);
8271 return amdgpu_dm_connector->num_modes;
8274 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8275 struct amdgpu_dm_connector *aconnector,
8277 struct dc_link *link,
8280 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8283 * Some of the properties below require access to state, like bpc.
8284 * Allocate some default initial connector state with our reset helper.
8286 if (aconnector->base.funcs->reset)
8287 aconnector->base.funcs->reset(&aconnector->base);
8289 aconnector->connector_id = link_index;
8290 aconnector->dc_link = link;
8291 aconnector->base.interlace_allowed = false;
8292 aconnector->base.doublescan_allowed = false;
8293 aconnector->base.stereo_allowed = false;
8294 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8295 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8296 aconnector->audio_inst = -1;
8297 mutex_init(&aconnector->hpd_lock);
8300 * configure support HPD hot plug connector_>polled default value is 0
8301 * which means HPD hot plug not supported
8303 switch (connector_type) {
8304 case DRM_MODE_CONNECTOR_HDMIA:
8305 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8306 aconnector->base.ycbcr_420_allowed =
8307 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8309 case DRM_MODE_CONNECTOR_DisplayPort:
8310 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8311 if (link->is_dig_mapping_flexible &&
8312 link->dc->res_pool->funcs->link_encs_assign) {
8314 link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
8315 if (!link->link_enc)
8317 link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
8321 aconnector->base.ycbcr_420_allowed =
8322 link->link_enc->features.dp_ycbcr420_supported ? true : false;
8324 case DRM_MODE_CONNECTOR_DVID:
8325 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8331 drm_object_attach_property(&aconnector->base.base,
8332 dm->ddev->mode_config.scaling_mode_property,
8333 DRM_MODE_SCALE_NONE);
8335 drm_object_attach_property(&aconnector->base.base,
8336 adev->mode_info.underscan_property,
8338 drm_object_attach_property(&aconnector->base.base,
8339 adev->mode_info.underscan_hborder_property,
8341 drm_object_attach_property(&aconnector->base.base,
8342 adev->mode_info.underscan_vborder_property,
8345 if (!aconnector->mst_port)
8346 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8348 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8349 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8350 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8352 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8353 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8354 drm_object_attach_property(&aconnector->base.base,
8355 adev->mode_info.abm_level_property, 0);
8358 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8359 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8360 connector_type == DRM_MODE_CONNECTOR_eDP) {
8361 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8363 if (!aconnector->mst_port)
8364 drm_connector_attach_vrr_capable_property(&aconnector->base);
8366 #ifdef CONFIG_DRM_AMD_DC_HDCP
8367 if (adev->dm.hdcp_workqueue)
8368 drm_connector_attach_content_protection_property(&aconnector->base, true);
8373 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8374 struct i2c_msg *msgs, int num)
8376 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8377 struct ddc_service *ddc_service = i2c->ddc_service;
8378 struct i2c_command cmd;
8382 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8387 cmd.number_of_payloads = num;
8388 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8391 for (i = 0; i < num; i++) {
8392 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8393 cmd.payloads[i].address = msgs[i].addr;
8394 cmd.payloads[i].length = msgs[i].len;
8395 cmd.payloads[i].data = msgs[i].buf;
8399 ddc_service->ctx->dc,
8400 ddc_service->ddc_pin->hw_info.ddc_channel,
8404 kfree(cmd.payloads);
8408 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8410 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8413 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8414 .master_xfer = amdgpu_dm_i2c_xfer,
8415 .functionality = amdgpu_dm_i2c_func,
8418 static struct amdgpu_i2c_adapter *
8419 create_i2c(struct ddc_service *ddc_service,
8423 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8424 struct amdgpu_i2c_adapter *i2c;
8426 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8429 i2c->base.owner = THIS_MODULE;
8430 i2c->base.class = I2C_CLASS_DDC;
8431 i2c->base.dev.parent = &adev->pdev->dev;
8432 i2c->base.algo = &amdgpu_dm_i2c_algo;
8433 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8434 i2c_set_adapdata(&i2c->base, i2c);
8435 i2c->ddc_service = ddc_service;
8436 if (i2c->ddc_service->ddc_pin)
8437 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8444 * Note: this function assumes that dc_link_detect() was called for the
8445 * dc_link which will be represented by this aconnector.
8447 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8448 struct amdgpu_dm_connector *aconnector,
8449 uint32_t link_index,
8450 struct amdgpu_encoder *aencoder)
8454 struct dc *dc = dm->dc;
8455 struct dc_link *link = dc_get_link_at_index(dc, link_index);
8456 struct amdgpu_i2c_adapter *i2c;
8458 link->priv = aconnector;
8460 DRM_DEBUG_DRIVER("%s()\n", __func__);
8462 i2c = create_i2c(link->ddc, link->link_index, &res);
8464 DRM_ERROR("Failed to create i2c adapter data\n");
8468 aconnector->i2c = i2c;
8469 res = i2c_add_adapter(&i2c->base);
8472 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8476 connector_type = to_drm_connector_type(link->connector_signal);
8478 res = drm_connector_init_with_ddc(
8481 &amdgpu_dm_connector_funcs,
8486 DRM_ERROR("connector_init failed\n");
8487 aconnector->connector_id = -1;
8491 drm_connector_helper_add(
8493 &amdgpu_dm_connector_helper_funcs);
8495 amdgpu_dm_connector_init_helper(
8502 drm_connector_attach_encoder(
8503 &aconnector->base, &aencoder->base);
8505 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8506 || connector_type == DRM_MODE_CONNECTOR_eDP)
8507 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8512 aconnector->i2c = NULL;
8517 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8519 switch (adev->mode_info.num_crtc) {
8536 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8537 struct amdgpu_encoder *aencoder,
8538 uint32_t link_index)
8540 struct amdgpu_device *adev = drm_to_adev(dev);
8542 int res = drm_encoder_init(dev,
8544 &amdgpu_dm_encoder_funcs,
8545 DRM_MODE_ENCODER_TMDS,
8548 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8551 aencoder->encoder_id = link_index;
8553 aencoder->encoder_id = -1;
8555 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8560 static void manage_dm_interrupts(struct amdgpu_device *adev,
8561 struct amdgpu_crtc *acrtc,
8565 * We have no guarantee that the frontend index maps to the same
8566 * backend index - some even map to more than one.
8568 * TODO: Use a different interrupt or check DC itself for the mapping.
8571 amdgpu_display_crtc_idx_to_irq_type(
8576 drm_crtc_vblank_on(&acrtc->base);
8579 &adev->pageflip_irq,
8581 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8588 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8596 &adev->pageflip_irq,
8598 drm_crtc_vblank_off(&acrtc->base);
8602 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8603 struct amdgpu_crtc *acrtc)
8606 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8609 * This reads the current state for the IRQ and force reapplies
8610 * the setting to hardware.
8612 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8616 is_scaling_state_different(const struct dm_connector_state *dm_state,
8617 const struct dm_connector_state *old_dm_state)
8619 if (dm_state->scaling != old_dm_state->scaling)
8621 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8622 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8624 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8625 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8627 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8628 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8633 #ifdef CONFIG_DRM_AMD_DC_HDCP
8634 static bool is_content_protection_different(struct drm_connector_state *state,
8635 const struct drm_connector_state *old_state,
8636 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8638 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8639 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8641 /* Handle: Type0/1 change */
8642 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8643 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8644 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8648 /* CP is being re enabled, ignore this
8650 * Handles: ENABLED -> DESIRED
8652 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8653 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8654 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8658 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8660 * Handles: UNDESIRED -> ENABLED
8662 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8663 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8664 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8666 /* Stream removed and re-enabled
8668 * Can sometimes overlap with the HPD case,
8669 * thus set update_hdcp to false to avoid
8670 * setting HDCP multiple times.
8672 * Handles: DESIRED -> DESIRED (Special case)
8674 if (!(old_state->crtc && old_state->crtc->enabled) &&
8675 state->crtc && state->crtc->enabled &&
8676 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8677 dm_con_state->update_hdcp = false;
8681 /* Hot-plug, headless s3, dpms
8683 * Only start HDCP if the display is connected/enabled.
8684 * update_hdcp flag will be set to false until the next
8687 * Handles: DESIRED -> DESIRED (Special case)
8689 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8690 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8691 dm_con_state->update_hdcp = false;
8696 * Handles: UNDESIRED -> UNDESIRED
8697 * DESIRED -> DESIRED
8698 * ENABLED -> ENABLED
8700 if (old_state->content_protection == state->content_protection)
8704 * Handles: UNDESIRED -> DESIRED
8705 * DESIRED -> UNDESIRED
8706 * ENABLED -> UNDESIRED
8708 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8712 * Handles: DESIRED -> ENABLED
8718 static void remove_stream(struct amdgpu_device *adev,
8719 struct amdgpu_crtc *acrtc,
8720 struct dc_stream_state *stream)
8722 /* this is the update mode case */
8724 acrtc->otg_inst = -1;
8725 acrtc->enabled = false;
8728 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8729 struct dc_cursor_position *position)
8731 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8733 int xorigin = 0, yorigin = 0;
8735 if (!crtc || !plane->state->fb)
8738 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8739 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8740 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8742 plane->state->crtc_w,
8743 plane->state->crtc_h);
8747 x = plane->state->crtc_x;
8748 y = plane->state->crtc_y;
8750 if (x <= -amdgpu_crtc->max_cursor_width ||
8751 y <= -amdgpu_crtc->max_cursor_height)
8755 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8759 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8762 position->enable = true;
8763 position->translate_by_source = true;
8766 position->x_hotspot = xorigin;
8767 position->y_hotspot = yorigin;
8772 static void handle_cursor_update(struct drm_plane *plane,
8773 struct drm_plane_state *old_plane_state)
8775 struct amdgpu_device *adev = drm_to_adev(plane->dev);
8776 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8777 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8778 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8779 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8780 uint64_t address = afb ? afb->address : 0;
8781 struct dc_cursor_position position = {0};
8782 struct dc_cursor_attributes attributes;
8785 if (!plane->state->fb && !old_plane_state->fb)
8788 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8790 amdgpu_crtc->crtc_id,
8791 plane->state->crtc_w,
8792 plane->state->crtc_h);
8794 ret = get_cursor_position(plane, crtc, &position);
8798 if (!position.enable) {
8799 /* turn off cursor */
8800 if (crtc_state && crtc_state->stream) {
8801 mutex_lock(&adev->dm.dc_lock);
8802 dc_stream_set_cursor_position(crtc_state->stream,
8804 mutex_unlock(&adev->dm.dc_lock);
8809 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8810 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8812 memset(&attributes, 0, sizeof(attributes));
8813 attributes.address.high_part = upper_32_bits(address);
8814 attributes.address.low_part = lower_32_bits(address);
8815 attributes.width = plane->state->crtc_w;
8816 attributes.height = plane->state->crtc_h;
8817 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8818 attributes.rotation_angle = 0;
8819 attributes.attribute_flags.value = 0;
8821 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8823 if (crtc_state->stream) {
8824 mutex_lock(&adev->dm.dc_lock);
8825 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8827 DRM_ERROR("DC failed to set cursor attributes\n");
8829 if (!dc_stream_set_cursor_position(crtc_state->stream,
8831 DRM_ERROR("DC failed to set cursor position\n");
8832 mutex_unlock(&adev->dm.dc_lock);
8836 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8839 assert_spin_locked(&acrtc->base.dev->event_lock);
8840 WARN_ON(acrtc->event);
8842 acrtc->event = acrtc->base.state->event;
8844 /* Set the flip status */
8845 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8847 /* Mark this event as consumed */
8848 acrtc->base.state->event = NULL;
8850 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8854 static void update_freesync_state_on_stream(
8855 struct amdgpu_display_manager *dm,
8856 struct dm_crtc_state *new_crtc_state,
8857 struct dc_stream_state *new_stream,
8858 struct dc_plane_state *surface,
8859 u32 flip_timestamp_in_us)
8861 struct mod_vrr_params vrr_params;
8862 struct dc_info_packet vrr_infopacket = {0};
8863 struct amdgpu_device *adev = dm->adev;
8864 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8865 unsigned long flags;
8866 bool pack_sdp_v1_3 = false;
8872 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8873 * For now it's sufficient to just guard against these conditions.
8876 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8879 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8880 vrr_params = acrtc->dm_irq_params.vrr_params;
8883 mod_freesync_handle_preflip(
8884 dm->freesync_module,
8887 flip_timestamp_in_us,
8890 if (adev->family < AMDGPU_FAMILY_AI &&
8891 amdgpu_dm_vrr_active(new_crtc_state)) {
8892 mod_freesync_handle_v_update(dm->freesync_module,
8893 new_stream, &vrr_params);
8895 /* Need to call this before the frame ends. */
8896 dc_stream_adjust_vmin_vmax(dm->dc,
8897 new_crtc_state->stream,
8898 &vrr_params.adjust);
8902 mod_freesync_build_vrr_infopacket(
8903 dm->freesync_module,
8907 TRANSFER_FUNC_UNKNOWN,
8911 new_crtc_state->freesync_timing_changed |=
8912 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8914 sizeof(vrr_params.adjust)) != 0);
8916 new_crtc_state->freesync_vrr_info_changed |=
8917 (memcmp(&new_crtc_state->vrr_infopacket,
8919 sizeof(vrr_infopacket)) != 0);
8921 acrtc->dm_irq_params.vrr_params = vrr_params;
8922 new_crtc_state->vrr_infopacket = vrr_infopacket;
8924 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8925 new_stream->vrr_infopacket = vrr_infopacket;
8927 if (new_crtc_state->freesync_vrr_info_changed)
8928 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8929 new_crtc_state->base.crtc->base.id,
8930 (int)new_crtc_state->base.vrr_enabled,
8931 (int)vrr_params.state);
8933 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8936 static void update_stream_irq_parameters(
8937 struct amdgpu_display_manager *dm,
8938 struct dm_crtc_state *new_crtc_state)
8940 struct dc_stream_state *new_stream = new_crtc_state->stream;
8941 struct mod_vrr_params vrr_params;
8942 struct mod_freesync_config config = new_crtc_state->freesync_config;
8943 struct amdgpu_device *adev = dm->adev;
8944 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8945 unsigned long flags;
8951 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8952 * For now it's sufficient to just guard against these conditions.
8954 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8957 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8958 vrr_params = acrtc->dm_irq_params.vrr_params;
8960 if (new_crtc_state->vrr_supported &&
8961 config.min_refresh_in_uhz &&
8962 config.max_refresh_in_uhz) {
8964 * if freesync compatible mode was set, config.state will be set
8967 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8968 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8969 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8970 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8971 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8972 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8973 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8975 config.state = new_crtc_state->base.vrr_enabled ?
8976 VRR_STATE_ACTIVE_VARIABLE :
8980 config.state = VRR_STATE_UNSUPPORTED;
8983 mod_freesync_build_vrr_params(dm->freesync_module,
8985 &config, &vrr_params);
8987 new_crtc_state->freesync_timing_changed |=
8988 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8989 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8991 new_crtc_state->freesync_config = config;
8992 /* Copy state for access from DM IRQ handler */
8993 acrtc->dm_irq_params.freesync_config = config;
8994 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8995 acrtc->dm_irq_params.vrr_params = vrr_params;
8996 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8999 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9000 struct dm_crtc_state *new_state)
9002 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9003 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9005 if (!old_vrr_active && new_vrr_active) {
9006 /* Transition VRR inactive -> active:
9007 * While VRR is active, we must not disable vblank irq, as a
9008 * reenable after disable would compute bogus vblank/pflip
9009 * timestamps if it likely happened inside display front-porch.
9011 * We also need vupdate irq for the actual core vblank handling
9014 dm_set_vupdate_irq(new_state->base.crtc, true);
9015 drm_crtc_vblank_get(new_state->base.crtc);
9016 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9017 __func__, new_state->base.crtc->base.id);
9018 } else if (old_vrr_active && !new_vrr_active) {
9019 /* Transition VRR active -> inactive:
9020 * Allow vblank irq disable again for fixed refresh rate.
9022 dm_set_vupdate_irq(new_state->base.crtc, false);
9023 drm_crtc_vblank_put(new_state->base.crtc);
9024 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9025 __func__, new_state->base.crtc->base.id);
9029 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9031 struct drm_plane *plane;
9032 struct drm_plane_state *old_plane_state;
9036 * TODO: Make this per-stream so we don't issue redundant updates for
9037 * commits with multiple streams.
9039 for_each_old_plane_in_state(state, plane, old_plane_state, i)
9040 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9041 handle_cursor_update(plane, old_plane_state);
9044 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9045 struct dc_state *dc_state,
9046 struct drm_device *dev,
9047 struct amdgpu_display_manager *dm,
9048 struct drm_crtc *pcrtc,
9049 bool wait_for_vblank)
9052 uint64_t timestamp_ns;
9053 struct drm_plane *plane;
9054 struct drm_plane_state *old_plane_state, *new_plane_state;
9055 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9056 struct drm_crtc_state *new_pcrtc_state =
9057 drm_atomic_get_new_crtc_state(state, pcrtc);
9058 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9059 struct dm_crtc_state *dm_old_crtc_state =
9060 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9061 int planes_count = 0, vpos, hpos;
9063 unsigned long flags;
9064 struct amdgpu_bo *abo;
9065 uint32_t target_vblank, last_flip_vblank;
9066 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9067 bool pflip_present = false;
9069 struct dc_surface_update surface_updates[MAX_SURFACES];
9070 struct dc_plane_info plane_infos[MAX_SURFACES];
9071 struct dc_scaling_info scaling_infos[MAX_SURFACES];
9072 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9073 struct dc_stream_update stream_update;
9076 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9079 dm_error("Failed to allocate update bundle\n");
9084 * Disable the cursor first if we're disabling all the planes.
9085 * It'll remain on the screen after the planes are re-enabled
9088 if (acrtc_state->active_planes == 0)
9089 amdgpu_dm_commit_cursors(state);
9091 /* update planes when needed */
9092 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9093 struct drm_crtc *crtc = new_plane_state->crtc;
9094 struct drm_crtc_state *new_crtc_state;
9095 struct drm_framebuffer *fb = new_plane_state->fb;
9096 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9097 bool plane_needs_flip;
9098 struct dc_plane_state *dc_plane;
9099 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9101 /* Cursor plane is handled after stream updates */
9102 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9105 if (!fb || !crtc || pcrtc != crtc)
9108 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9109 if (!new_crtc_state->active)
9112 dc_plane = dm_new_plane_state->dc_state;
9114 bundle->surface_updates[planes_count].surface = dc_plane;
9115 if (new_pcrtc_state->color_mgmt_changed) {
9116 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9117 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9118 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9121 fill_dc_scaling_info(dm->adev, new_plane_state,
9122 &bundle->scaling_infos[planes_count]);
9124 bundle->surface_updates[planes_count].scaling_info =
9125 &bundle->scaling_infos[planes_count];
9127 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9129 pflip_present = pflip_present || plane_needs_flip;
9131 if (!plane_needs_flip) {
9136 abo = gem_to_amdgpu_bo(fb->obj[0]);
9139 * Wait for all fences on this FB. Do limited wait to avoid
9140 * deadlock during GPU reset when this fence will not signal
9141 * but we hold reservation lock for the BO.
9143 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9144 msecs_to_jiffies(5000));
9145 if (unlikely(r <= 0))
9146 DRM_ERROR("Waiting for fences timed out!");
9148 fill_dc_plane_info_and_addr(
9149 dm->adev, new_plane_state,
9151 &bundle->plane_infos[planes_count],
9152 &bundle->flip_addrs[planes_count].address,
9153 afb->tmz_surface, false);
9155 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9156 new_plane_state->plane->index,
9157 bundle->plane_infos[planes_count].dcc.enable);
9159 bundle->surface_updates[planes_count].plane_info =
9160 &bundle->plane_infos[planes_count];
9163 * Only allow immediate flips for fast updates that don't
9164 * change FB pitch, DCC state, rotation or mirroing.
9166 bundle->flip_addrs[planes_count].flip_immediate =
9167 crtc->state->async_flip &&
9168 acrtc_state->update_type == UPDATE_TYPE_FAST;
9170 timestamp_ns = ktime_get_ns();
9171 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9172 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9173 bundle->surface_updates[planes_count].surface = dc_plane;
9175 if (!bundle->surface_updates[planes_count].surface) {
9176 DRM_ERROR("No surface for CRTC: id=%d\n",
9177 acrtc_attach->crtc_id);
9181 if (plane == pcrtc->primary)
9182 update_freesync_state_on_stream(
9185 acrtc_state->stream,
9187 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9189 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9191 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9192 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9198 if (pflip_present) {
9200 /* Use old throttling in non-vrr fixed refresh rate mode
9201 * to keep flip scheduling based on target vblank counts
9202 * working in a backwards compatible way, e.g., for
9203 * clients using the GLX_OML_sync_control extension or
9204 * DRI3/Present extension with defined target_msc.
9206 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9209 /* For variable refresh rate mode only:
9210 * Get vblank of last completed flip to avoid > 1 vrr
9211 * flips per video frame by use of throttling, but allow
9212 * flip programming anywhere in the possibly large
9213 * variable vrr vblank interval for fine-grained flip
9214 * timing control and more opportunity to avoid stutter
9215 * on late submission of flips.
9217 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9218 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9219 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9222 target_vblank = last_flip_vblank + wait_for_vblank;
9225 * Wait until we're out of the vertical blank period before the one
9226 * targeted by the flip
9228 while ((acrtc_attach->enabled &&
9229 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9230 0, &vpos, &hpos, NULL,
9231 NULL, &pcrtc->hwmode)
9232 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9233 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9234 (int)(target_vblank -
9235 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9236 usleep_range(1000, 1100);
9240 * Prepare the flip event for the pageflip interrupt to handle.
9242 * This only works in the case where we've already turned on the
9243 * appropriate hardware blocks (eg. HUBP) so in the transition case
9244 * from 0 -> n planes we have to skip a hardware generated event
9245 * and rely on sending it from software.
9247 if (acrtc_attach->base.state->event &&
9248 acrtc_state->active_planes > 0 &&
9249 !acrtc_state->force_dpms_off) {
9250 drm_crtc_vblank_get(pcrtc);
9252 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9254 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9255 prepare_flip_isr(acrtc_attach);
9257 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9260 if (acrtc_state->stream) {
9261 if (acrtc_state->freesync_vrr_info_changed)
9262 bundle->stream_update.vrr_infopacket =
9263 &acrtc_state->stream->vrr_infopacket;
9267 /* Update the planes if changed or disable if we don't have any. */
9268 if ((planes_count || acrtc_state->active_planes == 0) &&
9269 acrtc_state->stream) {
9270 #if defined(CONFIG_DRM_AMD_DC_DCN)
9272 * If PSR or idle optimizations are enabled then flush out
9273 * any pending work before hardware programming.
9275 if (dm->vblank_control_workqueue)
9276 flush_workqueue(dm->vblank_control_workqueue);
9279 bundle->stream_update.stream = acrtc_state->stream;
9280 if (new_pcrtc_state->mode_changed) {
9281 bundle->stream_update.src = acrtc_state->stream->src;
9282 bundle->stream_update.dst = acrtc_state->stream->dst;
9285 if (new_pcrtc_state->color_mgmt_changed) {
9287 * TODO: This isn't fully correct since we've actually
9288 * already modified the stream in place.
9290 bundle->stream_update.gamut_remap =
9291 &acrtc_state->stream->gamut_remap_matrix;
9292 bundle->stream_update.output_csc_transform =
9293 &acrtc_state->stream->csc_color_matrix;
9294 bundle->stream_update.out_transfer_func =
9295 acrtc_state->stream->out_transfer_func;
9298 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9299 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9300 bundle->stream_update.abm_level = &acrtc_state->abm_level;
9303 * If FreeSync state on the stream has changed then we need to
9304 * re-adjust the min/max bounds now that DC doesn't handle this
9305 * as part of commit.
9307 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9308 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9309 dc_stream_adjust_vmin_vmax(
9310 dm->dc, acrtc_state->stream,
9311 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9312 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9314 mutex_lock(&dm->dc_lock);
9315 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9316 acrtc_state->stream->link->psr_settings.psr_allow_active)
9317 amdgpu_dm_psr_disable(acrtc_state->stream);
9319 dc_commit_updates_for_stream(dm->dc,
9320 bundle->surface_updates,
9322 acrtc_state->stream,
9323 &bundle->stream_update,
9327 * Enable or disable the interrupts on the backend.
9329 * Most pipes are put into power gating when unused.
9331 * When power gating is enabled on a pipe we lose the
9332 * interrupt enablement state when power gating is disabled.
9334 * So we need to update the IRQ control state in hardware
9335 * whenever the pipe turns on (since it could be previously
9336 * power gated) or off (since some pipes can't be power gated
9339 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9340 dm_update_pflip_irq_state(drm_to_adev(dev),
9343 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9344 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9345 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9346 amdgpu_dm_link_setup_psr(acrtc_state->stream);
9348 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9349 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9350 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9351 struct amdgpu_dm_connector *aconn =
9352 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9354 if (aconn->psr_skip_count > 0)
9355 aconn->psr_skip_count--;
9357 /* Allow PSR when skip count is 0. */
9358 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9360 acrtc_attach->dm_irq_params.allow_psr_entry = false;
9363 mutex_unlock(&dm->dc_lock);
9367 * Update cursor state *after* programming all the planes.
9368 * This avoids redundant programming in the case where we're going
9369 * to be disabling a single plane - those pipes are being disabled.
9371 if (acrtc_state->active_planes)
9372 amdgpu_dm_commit_cursors(state);
9378 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9379 struct drm_atomic_state *state)
9381 struct amdgpu_device *adev = drm_to_adev(dev);
9382 struct amdgpu_dm_connector *aconnector;
9383 struct drm_connector *connector;
9384 struct drm_connector_state *old_con_state, *new_con_state;
9385 struct drm_crtc_state *new_crtc_state;
9386 struct dm_crtc_state *new_dm_crtc_state;
9387 const struct dc_stream_status *status;
9390 /* Notify device removals. */
9391 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9392 if (old_con_state->crtc != new_con_state->crtc) {
9393 /* CRTC changes require notification. */
9397 if (!new_con_state->crtc)
9400 new_crtc_state = drm_atomic_get_new_crtc_state(
9401 state, new_con_state->crtc);
9403 if (!new_crtc_state)
9406 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9410 aconnector = to_amdgpu_dm_connector(connector);
9412 mutex_lock(&adev->dm.audio_lock);
9413 inst = aconnector->audio_inst;
9414 aconnector->audio_inst = -1;
9415 mutex_unlock(&adev->dm.audio_lock);
9417 amdgpu_dm_audio_eld_notify(adev, inst);
9420 /* Notify audio device additions. */
9421 for_each_new_connector_in_state(state, connector, new_con_state, i) {
9422 if (!new_con_state->crtc)
9425 new_crtc_state = drm_atomic_get_new_crtc_state(
9426 state, new_con_state->crtc);
9428 if (!new_crtc_state)
9431 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9434 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9435 if (!new_dm_crtc_state->stream)
9438 status = dc_stream_get_status(new_dm_crtc_state->stream);
9442 aconnector = to_amdgpu_dm_connector(connector);
9444 mutex_lock(&adev->dm.audio_lock);
9445 inst = status->audio_inst;
9446 aconnector->audio_inst = inst;
9447 mutex_unlock(&adev->dm.audio_lock);
9449 amdgpu_dm_audio_eld_notify(adev, inst);
9454 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9455 * @crtc_state: the DRM CRTC state
9456 * @stream_state: the DC stream state.
9458 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9459 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9461 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9462 struct dc_stream_state *stream_state)
9464 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9468 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9469 * @state: The atomic state to commit
9471 * This will tell DC to commit the constructed DC state from atomic_check,
9472 * programming the hardware. Any failures here implies a hardware failure, since
9473 * atomic check should have filtered anything non-kosher.
9475 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9477 struct drm_device *dev = state->dev;
9478 struct amdgpu_device *adev = drm_to_adev(dev);
9479 struct amdgpu_display_manager *dm = &adev->dm;
9480 struct dm_atomic_state *dm_state;
9481 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9483 struct drm_crtc *crtc;
9484 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9485 unsigned long flags;
9486 bool wait_for_vblank = true;
9487 struct drm_connector *connector;
9488 struct drm_connector_state *old_con_state, *new_con_state;
9489 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9490 int crtc_disable_count = 0;
9491 bool mode_set_reset_required = false;
9493 trace_amdgpu_dm_atomic_commit_tail_begin(state);
9495 drm_atomic_helper_update_legacy_modeset_state(dev, state);
9497 dm_state = dm_atomic_get_new_state(state);
9498 if (dm_state && dm_state->context) {
9499 dc_state = dm_state->context;
9501 /* No state changes, retain current state. */
9502 dc_state_temp = dc_create_state(dm->dc);
9503 ASSERT(dc_state_temp);
9504 dc_state = dc_state_temp;
9505 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9508 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9509 new_crtc_state, i) {
9510 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9512 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9514 if (old_crtc_state->active &&
9515 (!new_crtc_state->active ||
9516 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9517 manage_dm_interrupts(adev, acrtc, false);
9518 dc_stream_release(dm_old_crtc_state->stream);
9522 drm_atomic_helper_calc_timestamping_constants(state);
9524 /* update changed items */
9525 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9526 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9528 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9529 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9532 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9533 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9534 "connectors_changed:%d\n",
9536 new_crtc_state->enable,
9537 new_crtc_state->active,
9538 new_crtc_state->planes_changed,
9539 new_crtc_state->mode_changed,
9540 new_crtc_state->active_changed,
9541 new_crtc_state->connectors_changed);
9543 /* Disable cursor if disabling crtc */
9544 if (old_crtc_state->active && !new_crtc_state->active) {
9545 struct dc_cursor_position position;
9547 memset(&position, 0, sizeof(position));
9548 mutex_lock(&dm->dc_lock);
9549 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9550 mutex_unlock(&dm->dc_lock);
9553 /* Copy all transient state flags into dc state */
9554 if (dm_new_crtc_state->stream) {
9555 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9556 dm_new_crtc_state->stream);
9559 /* handles headless hotplug case, updating new_state and
9560 * aconnector as needed
9563 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9565 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9567 if (!dm_new_crtc_state->stream) {
9569 * this could happen because of issues with
9570 * userspace notifications delivery.
9571 * In this case userspace tries to set mode on
9572 * display which is disconnected in fact.
9573 * dc_sink is NULL in this case on aconnector.
9574 * We expect reset mode will come soon.
9576 * This can also happen when unplug is done
9577 * during resume sequence ended
9579 * In this case, we want to pretend we still
9580 * have a sink to keep the pipe running so that
9581 * hw state is consistent with the sw state
9583 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9584 __func__, acrtc->base.base.id);
9588 if (dm_old_crtc_state->stream)
9589 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9591 pm_runtime_get_noresume(dev->dev);
9593 acrtc->enabled = true;
9594 acrtc->hw_mode = new_crtc_state->mode;
9595 crtc->hwmode = new_crtc_state->mode;
9596 mode_set_reset_required = true;
9597 } else if (modereset_required(new_crtc_state)) {
9598 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9599 /* i.e. reset mode */
9600 if (dm_old_crtc_state->stream)
9601 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9603 mode_set_reset_required = true;
9605 } /* for_each_crtc_in_state() */
9608 /* if there mode set or reset, disable eDP PSR */
9609 if (mode_set_reset_required) {
9610 #if defined(CONFIG_DRM_AMD_DC_DCN)
9611 if (dm->vblank_control_workqueue)
9612 flush_workqueue(dm->vblank_control_workqueue);
9614 amdgpu_dm_psr_disable_all(dm);
9617 dm_enable_per_frame_crtc_master_sync(dc_state);
9618 mutex_lock(&dm->dc_lock);
9619 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9620 #if defined(CONFIG_DRM_AMD_DC_DCN)
9621 /* Allow idle optimization when vblank count is 0 for display off */
9622 if (dm->active_vblank_irq_count == 0)
9623 dc_allow_idle_optimizations(dm->dc,true);
9625 mutex_unlock(&dm->dc_lock);
9628 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9629 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9631 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9633 if (dm_new_crtc_state->stream != NULL) {
9634 const struct dc_stream_status *status =
9635 dc_stream_get_status(dm_new_crtc_state->stream);
9638 status = dc_stream_get_status_from_state(dc_state,
9639 dm_new_crtc_state->stream);
9641 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9643 acrtc->otg_inst = status->primary_otg_inst;
9646 #ifdef CONFIG_DRM_AMD_DC_HDCP
9647 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9648 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9649 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9650 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9652 new_crtc_state = NULL;
9655 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9657 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9659 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9660 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9661 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9662 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9663 dm_new_con_state->update_hdcp = true;
9667 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9668 hdcp_update_display(
9669 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9670 new_con_state->hdcp_content_type,
9671 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9675 /* Handle connector state changes */
9676 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9677 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9678 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9679 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9680 struct dc_surface_update dummy_updates[MAX_SURFACES];
9681 struct dc_stream_update stream_update;
9682 struct dc_info_packet hdr_packet;
9683 struct dc_stream_status *status = NULL;
9684 bool abm_changed, hdr_changed, scaling_changed;
9686 memset(&dummy_updates, 0, sizeof(dummy_updates));
9687 memset(&stream_update, 0, sizeof(stream_update));
9690 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9691 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9694 /* Skip any modesets/resets */
9695 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9698 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9699 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9701 scaling_changed = is_scaling_state_different(dm_new_con_state,
9704 abm_changed = dm_new_crtc_state->abm_level !=
9705 dm_old_crtc_state->abm_level;
9708 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9710 if (!scaling_changed && !abm_changed && !hdr_changed)
9713 stream_update.stream = dm_new_crtc_state->stream;
9714 if (scaling_changed) {
9715 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9716 dm_new_con_state, dm_new_crtc_state->stream);
9718 stream_update.src = dm_new_crtc_state->stream->src;
9719 stream_update.dst = dm_new_crtc_state->stream->dst;
9723 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9725 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9729 fill_hdr_info_packet(new_con_state, &hdr_packet);
9730 stream_update.hdr_static_metadata = &hdr_packet;
9733 status = dc_stream_get_status(dm_new_crtc_state->stream);
9735 if (WARN_ON(!status))
9738 WARN_ON(!status->plane_count);
9741 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9742 * Here we create an empty update on each plane.
9743 * To fix this, DC should permit updating only stream properties.
9745 for (j = 0; j < status->plane_count; j++)
9746 dummy_updates[j].surface = status->plane_states[0];
9749 mutex_lock(&dm->dc_lock);
9750 dc_commit_updates_for_stream(dm->dc,
9752 status->plane_count,
9753 dm_new_crtc_state->stream,
9756 mutex_unlock(&dm->dc_lock);
9759 /* Count number of newly disabled CRTCs for dropping PM refs later. */
9760 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9761 new_crtc_state, i) {
9762 if (old_crtc_state->active && !new_crtc_state->active)
9763 crtc_disable_count++;
9765 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9766 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9768 /* For freesync config update on crtc state and params for irq */
9769 update_stream_irq_parameters(dm, dm_new_crtc_state);
9771 /* Handle vrr on->off / off->on transitions */
9772 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9777 * Enable interrupts for CRTCs that are newly enabled or went through
9778 * a modeset. It was intentionally deferred until after the front end
9779 * state was modified to wait until the OTG was on and so the IRQ
9780 * handlers didn't access stale or invalid state.
9782 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9783 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9784 #ifdef CONFIG_DEBUG_FS
9785 bool configure_crc = false;
9786 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9787 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9788 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9790 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9791 cur_crc_src = acrtc->dm_irq_params.crc_src;
9792 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9794 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9796 if (new_crtc_state->active &&
9797 (!old_crtc_state->active ||
9798 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9799 dc_stream_retain(dm_new_crtc_state->stream);
9800 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9801 manage_dm_interrupts(adev, acrtc, true);
9803 #ifdef CONFIG_DEBUG_FS
9805 * Frontend may have changed so reapply the CRC capture
9806 * settings for the stream.
9808 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9810 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9811 configure_crc = true;
9812 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9813 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9814 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9815 acrtc->dm_irq_params.crc_window.update_win = true;
9816 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9817 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9818 crc_rd_wrk->crtc = crtc;
9819 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9820 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9826 if (amdgpu_dm_crtc_configure_crc_source(
9827 crtc, dm_new_crtc_state, cur_crc_src))
9828 DRM_DEBUG_DRIVER("Failed to configure crc source");
9833 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9834 if (new_crtc_state->async_flip)
9835 wait_for_vblank = false;
9837 /* update planes when needed per crtc*/
9838 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9839 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9841 if (dm_new_crtc_state->stream)
9842 amdgpu_dm_commit_planes(state, dc_state, dev,
9843 dm, crtc, wait_for_vblank);
9846 /* Update audio instances for each connector. */
9847 amdgpu_dm_commit_audio(dev, state);
9849 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9850 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9851 /* restore the backlight level */
9852 for (i = 0; i < dm->num_of_edps; i++) {
9853 if (dm->backlight_dev[i] &&
9854 (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9855 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9859 * send vblank event on all events not handled in flip and
9860 * mark consumed event for drm_atomic_helper_commit_hw_done
9862 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9863 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9865 if (new_crtc_state->event)
9866 drm_send_event_locked(dev, &new_crtc_state->event->base);
9868 new_crtc_state->event = NULL;
9870 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9872 /* Signal HW programming completion */
9873 drm_atomic_helper_commit_hw_done(state);
9875 if (wait_for_vblank)
9876 drm_atomic_helper_wait_for_flip_done(dev, state);
9878 drm_atomic_helper_cleanup_planes(dev, state);
9880 /* return the stolen vga memory back to VRAM */
9881 if (!adev->mman.keep_stolen_vga_memory)
9882 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9883 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9886 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9887 * so we can put the GPU into runtime suspend if we're not driving any
9890 for (i = 0; i < crtc_disable_count; i++)
9891 pm_runtime_put_autosuspend(dev->dev);
9892 pm_runtime_mark_last_busy(dev->dev);
9895 dc_release_state(dc_state_temp);
9899 static int dm_force_atomic_commit(struct drm_connector *connector)
9902 struct drm_device *ddev = connector->dev;
9903 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9904 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9905 struct drm_plane *plane = disconnected_acrtc->base.primary;
9906 struct drm_connector_state *conn_state;
9907 struct drm_crtc_state *crtc_state;
9908 struct drm_plane_state *plane_state;
9913 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9915 /* Construct an atomic state to restore previous display setting */
9918 * Attach connectors to drm_atomic_state
9920 conn_state = drm_atomic_get_connector_state(state, connector);
9922 ret = PTR_ERR_OR_ZERO(conn_state);
9926 /* Attach crtc to drm_atomic_state*/
9927 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9929 ret = PTR_ERR_OR_ZERO(crtc_state);
9933 /* force a restore */
9934 crtc_state->mode_changed = true;
9936 /* Attach plane to drm_atomic_state */
9937 plane_state = drm_atomic_get_plane_state(state, plane);
9939 ret = PTR_ERR_OR_ZERO(plane_state);
9943 /* Call commit internally with the state we just constructed */
9944 ret = drm_atomic_commit(state);
9947 drm_atomic_state_put(state);
9949 DRM_ERROR("Restoring old state failed with %i\n", ret);
9955 * This function handles all cases when set mode does not come upon hotplug.
9956 * This includes when a display is unplugged then plugged back into the
9957 * same port and when running without usermode desktop manager supprot
9959 void dm_restore_drm_connector_state(struct drm_device *dev,
9960 struct drm_connector *connector)
9962 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9963 struct amdgpu_crtc *disconnected_acrtc;
9964 struct dm_crtc_state *acrtc_state;
9966 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9969 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9970 if (!disconnected_acrtc)
9973 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9974 if (!acrtc_state->stream)
9978 * If the previous sink is not released and different from the current,
9979 * we deduce we are in a state where we can not rely on usermode call
9980 * to turn on the display, so we do it here
9982 if (acrtc_state->stream->sink != aconnector->dc_sink)
9983 dm_force_atomic_commit(&aconnector->base);
9987 * Grabs all modesetting locks to serialize against any blocking commits,
9988 * Waits for completion of all non blocking commits.
9990 static int do_aquire_global_lock(struct drm_device *dev,
9991 struct drm_atomic_state *state)
9993 struct drm_crtc *crtc;
9994 struct drm_crtc_commit *commit;
9998 * Adding all modeset locks to aquire_ctx will
9999 * ensure that when the framework release it the
10000 * extra locks we are locking here will get released to
10002 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10006 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10007 spin_lock(&crtc->commit_lock);
10008 commit = list_first_entry_or_null(&crtc->commit_list,
10009 struct drm_crtc_commit, commit_entry);
10011 drm_crtc_commit_get(commit);
10012 spin_unlock(&crtc->commit_lock);
10018 * Make sure all pending HW programming completed and
10021 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10024 ret = wait_for_completion_interruptible_timeout(
10025 &commit->flip_done, 10*HZ);
10028 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10029 "timed out\n", crtc->base.id, crtc->name);
10031 drm_crtc_commit_put(commit);
10034 return ret < 0 ? ret : 0;
10037 static void get_freesync_config_for_crtc(
10038 struct dm_crtc_state *new_crtc_state,
10039 struct dm_connector_state *new_con_state)
10041 struct mod_freesync_config config = {0};
10042 struct amdgpu_dm_connector *aconnector =
10043 to_amdgpu_dm_connector(new_con_state->base.connector);
10044 struct drm_display_mode *mode = &new_crtc_state->base.mode;
10045 int vrefresh = drm_mode_vrefresh(mode);
10046 bool fs_vid_mode = false;
10048 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10049 vrefresh >= aconnector->min_vfreq &&
10050 vrefresh <= aconnector->max_vfreq;
10052 if (new_crtc_state->vrr_supported) {
10053 new_crtc_state->stream->ignore_msa_timing_param = true;
10054 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10056 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10057 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10058 config.vsif_supported = true;
10062 config.state = VRR_STATE_ACTIVE_FIXED;
10063 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10065 } else if (new_crtc_state->base.vrr_enabled) {
10066 config.state = VRR_STATE_ACTIVE_VARIABLE;
10068 config.state = VRR_STATE_INACTIVE;
10072 new_crtc_state->freesync_config = config;
10075 static void reset_freesync_config_for_crtc(
10076 struct dm_crtc_state *new_crtc_state)
10078 new_crtc_state->vrr_supported = false;
10080 memset(&new_crtc_state->vrr_infopacket, 0,
10081 sizeof(new_crtc_state->vrr_infopacket));
10085 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10086 struct drm_crtc_state *new_crtc_state)
10088 struct drm_display_mode old_mode, new_mode;
10090 if (!old_crtc_state || !new_crtc_state)
10093 old_mode = old_crtc_state->mode;
10094 new_mode = new_crtc_state->mode;
10096 if (old_mode.clock == new_mode.clock &&
10097 old_mode.hdisplay == new_mode.hdisplay &&
10098 old_mode.vdisplay == new_mode.vdisplay &&
10099 old_mode.htotal == new_mode.htotal &&
10100 old_mode.vtotal != new_mode.vtotal &&
10101 old_mode.hsync_start == new_mode.hsync_start &&
10102 old_mode.vsync_start != new_mode.vsync_start &&
10103 old_mode.hsync_end == new_mode.hsync_end &&
10104 old_mode.vsync_end != new_mode.vsync_end &&
10105 old_mode.hskew == new_mode.hskew &&
10106 old_mode.vscan == new_mode.vscan &&
10107 (old_mode.vsync_end - old_mode.vsync_start) ==
10108 (new_mode.vsync_end - new_mode.vsync_start))
10114 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10115 uint64_t num, den, res;
10116 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10118 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10120 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10121 den = (unsigned long long)new_crtc_state->mode.htotal *
10122 (unsigned long long)new_crtc_state->mode.vtotal;
10124 res = div_u64(num, den);
10125 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10128 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10129 struct drm_atomic_state *state,
10130 struct drm_crtc *crtc,
10131 struct drm_crtc_state *old_crtc_state,
10132 struct drm_crtc_state *new_crtc_state,
10134 bool *lock_and_validation_needed)
10136 struct dm_atomic_state *dm_state = NULL;
10137 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10138 struct dc_stream_state *new_stream;
10142 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10143 * update changed items
10145 struct amdgpu_crtc *acrtc = NULL;
10146 struct amdgpu_dm_connector *aconnector = NULL;
10147 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10148 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10152 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10153 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10154 acrtc = to_amdgpu_crtc(crtc);
10155 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10157 /* TODO This hack should go away */
10158 if (aconnector && enable) {
10159 /* Make sure fake sink is created in plug-in scenario */
10160 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10161 &aconnector->base);
10162 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10163 &aconnector->base);
10165 if (IS_ERR(drm_new_conn_state)) {
10166 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10170 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10171 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10173 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10176 new_stream = create_validate_stream_for_sink(aconnector,
10177 &new_crtc_state->mode,
10179 dm_old_crtc_state->stream);
10182 * we can have no stream on ACTION_SET if a display
10183 * was disconnected during S3, in this case it is not an
10184 * error, the OS will be updated after detection, and
10185 * will do the right thing on next atomic commit
10189 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10190 __func__, acrtc->base.base.id);
10196 * TODO: Check VSDB bits to decide whether this should
10197 * be enabled or not.
10199 new_stream->triggered_crtc_reset.enabled =
10200 dm->force_timing_sync;
10202 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10204 ret = fill_hdr_info_packet(drm_new_conn_state,
10205 &new_stream->hdr_static_metadata);
10210 * If we already removed the old stream from the context
10211 * (and set the new stream to NULL) then we can't reuse
10212 * the old stream even if the stream and scaling are unchanged.
10213 * We'll hit the BUG_ON and black screen.
10215 * TODO: Refactor this function to allow this check to work
10216 * in all conditions.
10218 if (amdgpu_freesync_vid_mode &&
10219 dm_new_crtc_state->stream &&
10220 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10223 if (dm_new_crtc_state->stream &&
10224 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10225 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10226 new_crtc_state->mode_changed = false;
10227 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10228 new_crtc_state->mode_changed);
10232 /* mode_changed flag may get updated above, need to check again */
10233 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10237 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10238 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10239 "connectors_changed:%d\n",
10241 new_crtc_state->enable,
10242 new_crtc_state->active,
10243 new_crtc_state->planes_changed,
10244 new_crtc_state->mode_changed,
10245 new_crtc_state->active_changed,
10246 new_crtc_state->connectors_changed);
10248 /* Remove stream for any changed/disabled CRTC */
10251 if (!dm_old_crtc_state->stream)
10254 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10255 is_timing_unchanged_for_freesync(new_crtc_state,
10257 new_crtc_state->mode_changed = false;
10259 "Mode change not required for front porch change, "
10260 "setting mode_changed to %d",
10261 new_crtc_state->mode_changed);
10263 set_freesync_fixed_config(dm_new_crtc_state);
10266 } else if (amdgpu_freesync_vid_mode && aconnector &&
10267 is_freesync_video_mode(&new_crtc_state->mode,
10269 struct drm_display_mode *high_mode;
10271 high_mode = get_highest_refresh_rate_mode(aconnector, false);
10272 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10273 set_freesync_fixed_config(dm_new_crtc_state);
10277 ret = dm_atomic_get_state(state, &dm_state);
10281 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10284 /* i.e. reset mode */
10285 if (dc_remove_stream_from_ctx(
10288 dm_old_crtc_state->stream) != DC_OK) {
10293 dc_stream_release(dm_old_crtc_state->stream);
10294 dm_new_crtc_state->stream = NULL;
10296 reset_freesync_config_for_crtc(dm_new_crtc_state);
10298 *lock_and_validation_needed = true;
10300 } else {/* Add stream for any updated/enabled CRTC */
10302 * Quick fix to prevent NULL pointer on new_stream when
10303 * added MST connectors not found in existing crtc_state in the chained mode
10304 * TODO: need to dig out the root cause of that
10306 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10309 if (modereset_required(new_crtc_state))
10312 if (modeset_required(new_crtc_state, new_stream,
10313 dm_old_crtc_state->stream)) {
10315 WARN_ON(dm_new_crtc_state->stream);
10317 ret = dm_atomic_get_state(state, &dm_state);
10321 dm_new_crtc_state->stream = new_stream;
10323 dc_stream_retain(new_stream);
10325 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10328 if (dc_add_stream_to_ctx(
10331 dm_new_crtc_state->stream) != DC_OK) {
10336 *lock_and_validation_needed = true;
10341 /* Release extra reference */
10343 dc_stream_release(new_stream);
10346 * We want to do dc stream updates that do not require a
10347 * full modeset below.
10349 if (!(enable && aconnector && new_crtc_state->active))
10352 * Given above conditions, the dc state cannot be NULL because:
10353 * 1. We're in the process of enabling CRTCs (just been added
10354 * to the dc context, or already is on the context)
10355 * 2. Has a valid connector attached, and
10356 * 3. Is currently active and enabled.
10357 * => The dc stream state currently exists.
10359 BUG_ON(dm_new_crtc_state->stream == NULL);
10361 /* Scaling or underscan settings */
10362 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10363 drm_atomic_crtc_needs_modeset(new_crtc_state))
10364 update_stream_scaling_settings(
10365 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10368 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10371 * Color management settings. We also update color properties
10372 * when a modeset is needed, to ensure it gets reprogrammed.
10374 if (dm_new_crtc_state->base.color_mgmt_changed ||
10375 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10376 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10381 /* Update Freesync settings. */
10382 get_freesync_config_for_crtc(dm_new_crtc_state,
10383 dm_new_conn_state);
10389 dc_stream_release(new_stream);
10393 static bool should_reset_plane(struct drm_atomic_state *state,
10394 struct drm_plane *plane,
10395 struct drm_plane_state *old_plane_state,
10396 struct drm_plane_state *new_plane_state)
10398 struct drm_plane *other;
10399 struct drm_plane_state *old_other_state, *new_other_state;
10400 struct drm_crtc_state *new_crtc_state;
10404 * TODO: Remove this hack once the checks below are sufficient
10405 * enough to determine when we need to reset all the planes on
10408 if (state->allow_modeset)
10411 /* Exit early if we know that we're adding or removing the plane. */
10412 if (old_plane_state->crtc != new_plane_state->crtc)
10415 /* old crtc == new_crtc == NULL, plane not in context. */
10416 if (!new_plane_state->crtc)
10420 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10422 if (!new_crtc_state)
10425 /* CRTC Degamma changes currently require us to recreate planes. */
10426 if (new_crtc_state->color_mgmt_changed)
10429 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10433 * If there are any new primary or overlay planes being added or
10434 * removed then the z-order can potentially change. To ensure
10435 * correct z-order and pipe acquisition the current DC architecture
10436 * requires us to remove and recreate all existing planes.
10438 * TODO: Come up with a more elegant solution for this.
10440 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10441 struct amdgpu_framebuffer *old_afb, *new_afb;
10442 if (other->type == DRM_PLANE_TYPE_CURSOR)
10445 if (old_other_state->crtc != new_plane_state->crtc &&
10446 new_other_state->crtc != new_plane_state->crtc)
10449 if (old_other_state->crtc != new_other_state->crtc)
10452 /* Src/dst size and scaling updates. */
10453 if (old_other_state->src_w != new_other_state->src_w ||
10454 old_other_state->src_h != new_other_state->src_h ||
10455 old_other_state->crtc_w != new_other_state->crtc_w ||
10456 old_other_state->crtc_h != new_other_state->crtc_h)
10459 /* Rotation / mirroring updates. */
10460 if (old_other_state->rotation != new_other_state->rotation)
10463 /* Blending updates. */
10464 if (old_other_state->pixel_blend_mode !=
10465 new_other_state->pixel_blend_mode)
10468 /* Alpha updates. */
10469 if (old_other_state->alpha != new_other_state->alpha)
10472 /* Colorspace changes. */
10473 if (old_other_state->color_range != new_other_state->color_range ||
10474 old_other_state->color_encoding != new_other_state->color_encoding)
10477 /* Framebuffer checks fall at the end. */
10478 if (!old_other_state->fb || !new_other_state->fb)
10481 /* Pixel format changes can require bandwidth updates. */
10482 if (old_other_state->fb->format != new_other_state->fb->format)
10485 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10486 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10488 /* Tiling and DCC changes also require bandwidth updates. */
10489 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10490 old_afb->base.modifier != new_afb->base.modifier)
10497 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10498 struct drm_plane_state *new_plane_state,
10499 struct drm_framebuffer *fb)
10501 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10502 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10503 unsigned int pitch;
10506 if (fb->width > new_acrtc->max_cursor_width ||
10507 fb->height > new_acrtc->max_cursor_height) {
10508 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10509 new_plane_state->fb->width,
10510 new_plane_state->fb->height);
10513 if (new_plane_state->src_w != fb->width << 16 ||
10514 new_plane_state->src_h != fb->height << 16) {
10515 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10519 /* Pitch in pixels */
10520 pitch = fb->pitches[0] / fb->format->cpp[0];
10522 if (fb->width != pitch) {
10523 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10532 /* FB pitch is supported by cursor plane */
10535 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10539 /* Core DRM takes care of checking FB modifiers, so we only need to
10540 * check tiling flags when the FB doesn't have a modifier. */
10541 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10542 if (adev->family < AMDGPU_FAMILY_AI) {
10543 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10544 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10545 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10547 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10550 DRM_DEBUG_ATOMIC("Cursor FB not linear");
10558 static int dm_update_plane_state(struct dc *dc,
10559 struct drm_atomic_state *state,
10560 struct drm_plane *plane,
10561 struct drm_plane_state *old_plane_state,
10562 struct drm_plane_state *new_plane_state,
10564 bool *lock_and_validation_needed)
10567 struct dm_atomic_state *dm_state = NULL;
10568 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10569 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10570 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10571 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10572 struct amdgpu_crtc *new_acrtc;
10577 new_plane_crtc = new_plane_state->crtc;
10578 old_plane_crtc = old_plane_state->crtc;
10579 dm_new_plane_state = to_dm_plane_state(new_plane_state);
10580 dm_old_plane_state = to_dm_plane_state(old_plane_state);
10582 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10583 if (!enable || !new_plane_crtc ||
10584 drm_atomic_plane_disabling(plane->state, new_plane_state))
10587 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10589 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10590 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10594 if (new_plane_state->fb) {
10595 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10596 new_plane_state->fb);
10604 needs_reset = should_reset_plane(state, plane, old_plane_state,
10607 /* Remove any changed/removed planes */
10612 if (!old_plane_crtc)
10615 old_crtc_state = drm_atomic_get_old_crtc_state(
10616 state, old_plane_crtc);
10617 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10619 if (!dm_old_crtc_state->stream)
10622 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10623 plane->base.id, old_plane_crtc->base.id);
10625 ret = dm_atomic_get_state(state, &dm_state);
10629 if (!dc_remove_plane_from_context(
10631 dm_old_crtc_state->stream,
10632 dm_old_plane_state->dc_state,
10633 dm_state->context)) {
10639 dc_plane_state_release(dm_old_plane_state->dc_state);
10640 dm_new_plane_state->dc_state = NULL;
10642 *lock_and_validation_needed = true;
10644 } else { /* Add new planes */
10645 struct dc_plane_state *dc_new_plane_state;
10647 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10650 if (!new_plane_crtc)
10653 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10654 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10656 if (!dm_new_crtc_state->stream)
10662 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10666 WARN_ON(dm_new_plane_state->dc_state);
10668 dc_new_plane_state = dc_create_plane_state(dc);
10669 if (!dc_new_plane_state)
10672 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10673 plane->base.id, new_plane_crtc->base.id);
10675 ret = fill_dc_plane_attributes(
10676 drm_to_adev(new_plane_crtc->dev),
10677 dc_new_plane_state,
10681 dc_plane_state_release(dc_new_plane_state);
10685 ret = dm_atomic_get_state(state, &dm_state);
10687 dc_plane_state_release(dc_new_plane_state);
10692 * Any atomic check errors that occur after this will
10693 * not need a release. The plane state will be attached
10694 * to the stream, and therefore part of the atomic
10695 * state. It'll be released when the atomic state is
10698 if (!dc_add_plane_to_context(
10700 dm_new_crtc_state->stream,
10701 dc_new_plane_state,
10702 dm_state->context)) {
10704 dc_plane_state_release(dc_new_plane_state);
10708 dm_new_plane_state->dc_state = dc_new_plane_state;
10710 /* Tell DC to do a full surface update every time there
10711 * is a plane change. Inefficient, but works for now.
10713 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10715 *lock_and_validation_needed = true;
10722 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10723 struct drm_crtc *crtc,
10724 struct drm_crtc_state *new_crtc_state)
10726 struct drm_plane *cursor = crtc->cursor, *underlying;
10727 struct drm_plane_state *new_cursor_state, *new_underlying_state;
10729 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10731 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10732 * cursor per pipe but it's going to inherit the scaling and
10733 * positioning from the underlying pipe. Check the cursor plane's
10734 * blending properties match the underlying planes'. */
10736 new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10737 if (!new_cursor_state || !new_cursor_state->fb) {
10741 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10742 (new_cursor_state->src_w >> 16);
10743 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10744 (new_cursor_state->src_h >> 16);
10746 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10747 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10748 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10751 /* Ignore disabled planes */
10752 if (!new_underlying_state->fb)
10755 underlying_scale_w = new_underlying_state->crtc_w * 1000 /
10756 (new_underlying_state->src_w >> 16);
10757 underlying_scale_h = new_underlying_state->crtc_h * 1000 /
10758 (new_underlying_state->src_h >> 16);
10760 if (cursor_scale_w != underlying_scale_w ||
10761 cursor_scale_h != underlying_scale_h) {
10762 drm_dbg_atomic(crtc->dev,
10763 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10764 cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10768 /* If this plane covers the whole CRTC, no need to check planes underneath */
10769 if (new_underlying_state->crtc_x <= 0 &&
10770 new_underlying_state->crtc_y <= 0 &&
10771 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10772 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10779 #if defined(CONFIG_DRM_AMD_DC_DCN)
10780 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10782 struct drm_connector *connector;
10783 struct drm_connector_state *conn_state;
10784 struct amdgpu_dm_connector *aconnector = NULL;
10786 for_each_new_connector_in_state(state, connector, conn_state, i) {
10787 if (conn_state->crtc != crtc)
10790 aconnector = to_amdgpu_dm_connector(connector);
10791 if (!aconnector->port || !aconnector->mst_port)
10800 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10805 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10806 * @dev: The DRM device
10807 * @state: The atomic state to commit
10809 * Validate that the given atomic state is programmable by DC into hardware.
10810 * This involves constructing a &struct dc_state reflecting the new hardware
10811 * state we wish to commit, then querying DC to see if it is programmable. It's
10812 * important not to modify the existing DC state. Otherwise, atomic_check
10813 * may unexpectedly commit hardware changes.
10815 * When validating the DC state, it's important that the right locks are
10816 * acquired. For full updates case which removes/adds/updates streams on one
10817 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10818 * that any such full update commit will wait for completion of any outstanding
10819 * flip using DRMs synchronization events.
10821 * Note that DM adds the affected connectors for all CRTCs in state, when that
10822 * might not seem necessary. This is because DC stream creation requires the
10823 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10824 * be possible but non-trivial - a possible TODO item.
10826 * Return: -Error code if validation failed.
10828 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10829 struct drm_atomic_state *state)
10831 struct amdgpu_device *adev = drm_to_adev(dev);
10832 struct dm_atomic_state *dm_state = NULL;
10833 struct dc *dc = adev->dm.dc;
10834 struct drm_connector *connector;
10835 struct drm_connector_state *old_con_state, *new_con_state;
10836 struct drm_crtc *crtc;
10837 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10838 struct drm_plane *plane;
10839 struct drm_plane_state *old_plane_state, *new_plane_state;
10840 enum dc_status status;
10842 bool lock_and_validation_needed = false;
10843 struct dm_crtc_state *dm_old_crtc_state;
10844 #if defined(CONFIG_DRM_AMD_DC_DCN)
10845 struct dsc_mst_fairness_vars vars[MAX_PIPES];
10846 struct drm_dp_mst_topology_state *mst_state;
10847 struct drm_dp_mst_topology_mgr *mgr;
10850 trace_amdgpu_dm_atomic_check_begin(state);
10852 ret = drm_atomic_helper_check_modeset(dev, state);
10854 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10858 /* Check connector changes */
10859 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10860 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10861 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10863 /* Skip connectors that are disabled or part of modeset already. */
10864 if (!old_con_state->crtc && !new_con_state->crtc)
10867 if (!new_con_state->crtc)
10870 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10871 if (IS_ERR(new_crtc_state)) {
10872 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10873 ret = PTR_ERR(new_crtc_state);
10877 if (dm_old_con_state->abm_level !=
10878 dm_new_con_state->abm_level)
10879 new_crtc_state->connectors_changed = true;
10882 #if defined(CONFIG_DRM_AMD_DC_DCN)
10883 if (dc_resource_is_dsc_encoding_supported(dc)) {
10884 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10885 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10886 ret = add_affected_mst_dsc_crtcs(state, crtc);
10888 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
10895 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10896 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10898 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10899 !new_crtc_state->color_mgmt_changed &&
10900 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10901 dm_old_crtc_state->dsc_force_changed == false)
10904 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10906 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
10910 if (!new_crtc_state->enable)
10913 ret = drm_atomic_add_affected_connectors(state, crtc);
10915 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
10919 ret = drm_atomic_add_affected_planes(state, crtc);
10921 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
10925 if (dm_old_crtc_state->dsc_force_changed)
10926 new_crtc_state->mode_changed = true;
10930 * Add all primary and overlay planes on the CRTC to the state
10931 * whenever a plane is enabled to maintain correct z-ordering
10932 * and to enable fast surface updates.
10934 drm_for_each_crtc(crtc, dev) {
10935 bool modified = false;
10937 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10938 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10941 if (new_plane_state->crtc == crtc ||
10942 old_plane_state->crtc == crtc) {
10951 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10952 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10956 drm_atomic_get_plane_state(state, plane);
10958 if (IS_ERR(new_plane_state)) {
10959 ret = PTR_ERR(new_plane_state);
10960 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
10966 /* Remove exiting planes if they are modified */
10967 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10968 ret = dm_update_plane_state(dc, state, plane,
10972 &lock_and_validation_needed);
10974 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
10979 /* Disable all crtcs which require disable */
10980 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10981 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10985 &lock_and_validation_needed);
10987 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
10992 /* Enable all crtcs which require enable */
10993 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10994 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10998 &lock_and_validation_needed);
11000 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11005 /* Add new/modified planes */
11006 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11007 ret = dm_update_plane_state(dc, state, plane,
11011 &lock_and_validation_needed);
11013 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11018 /* Run this here since we want to validate the streams we created */
11019 ret = drm_atomic_helper_check_planes(dev, state);
11021 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11025 /* Check cursor planes scaling */
11026 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11027 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11029 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11034 if (state->legacy_cursor_update) {
11036 * This is a fast cursor update coming from the plane update
11037 * helper, check if it can be done asynchronously for better
11040 state->async_update =
11041 !drm_atomic_helper_async_check(dev, state);
11044 * Skip the remaining global validation if this is an async
11045 * update. Cursor updates can be done without affecting
11046 * state or bandwidth calcs and this avoids the performance
11047 * penalty of locking the private state object and
11048 * allocating a new dc_state.
11050 if (state->async_update)
11054 /* Check scaling and underscan changes*/
11055 /* TODO Removed scaling changes validation due to inability to commit
11056 * new stream into context w\o causing full reset. Need to
11057 * decide how to handle.
11059 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11060 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11061 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11062 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11064 /* Skip any modesets/resets */
11065 if (!acrtc || drm_atomic_crtc_needs_modeset(
11066 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11069 /* Skip any thing not scale or underscan changes */
11070 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11073 lock_and_validation_needed = true;
11076 #if defined(CONFIG_DRM_AMD_DC_DCN)
11077 /* set the slot info for each mst_state based on the link encoding format */
11078 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11079 struct amdgpu_dm_connector *aconnector;
11080 struct drm_connector *connector;
11081 struct drm_connector_list_iter iter;
11082 u8 link_coding_cap;
11084 if (!mgr->mst_state )
11087 drm_connector_list_iter_begin(dev, &iter);
11088 drm_for_each_connector_iter(connector, &iter) {
11089 int id = connector->index;
11091 if (id == mst_state->mgr->conn_base_id) {
11092 aconnector = to_amdgpu_dm_connector(connector);
11093 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11094 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11099 drm_connector_list_iter_end(&iter);
11104 * Streams and planes are reset when there are changes that affect
11105 * bandwidth. Anything that affects bandwidth needs to go through
11106 * DC global validation to ensure that the configuration can be applied
11109 * We have to currently stall out here in atomic_check for outstanding
11110 * commits to finish in this case because our IRQ handlers reference
11111 * DRM state directly - we can end up disabling interrupts too early
11114 * TODO: Remove this stall and drop DM state private objects.
11116 if (lock_and_validation_needed) {
11117 ret = dm_atomic_get_state(state, &dm_state);
11119 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11123 ret = do_aquire_global_lock(dev, state);
11125 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11129 #if defined(CONFIG_DRM_AMD_DC_DCN)
11130 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11131 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11135 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11137 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11143 * Perform validation of MST topology in the state:
11144 * We need to perform MST atomic check before calling
11145 * dc_validate_global_state(), or there is a chance
11146 * to get stuck in an infinite loop and hang eventually.
11148 ret = drm_dp_mst_atomic_check(state);
11150 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11153 status = dc_validate_global_state(dc, dm_state->context, true);
11154 if (status != DC_OK) {
11155 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11156 dc_status_to_str(status), status);
11162 * The commit is a fast update. Fast updates shouldn't change
11163 * the DC context, affect global validation, and can have their
11164 * commit work done in parallel with other commits not touching
11165 * the same resource. If we have a new DC context as part of
11166 * the DM atomic state from validation we need to free it and
11167 * retain the existing one instead.
11169 * Furthermore, since the DM atomic state only contains the DC
11170 * context and can safely be annulled, we can free the state
11171 * and clear the associated private object now to free
11172 * some memory and avoid a possible use-after-free later.
11175 for (i = 0; i < state->num_private_objs; i++) {
11176 struct drm_private_obj *obj = state->private_objs[i].ptr;
11178 if (obj->funcs == adev->dm.atomic_obj.funcs) {
11179 int j = state->num_private_objs-1;
11181 dm_atomic_destroy_state(obj,
11182 state->private_objs[i].state);
11184 /* If i is not at the end of the array then the
11185 * last element needs to be moved to where i was
11186 * before the array can safely be truncated.
11189 state->private_objs[i] =
11190 state->private_objs[j];
11192 state->private_objs[j].ptr = NULL;
11193 state->private_objs[j].state = NULL;
11194 state->private_objs[j].old_state = NULL;
11195 state->private_objs[j].new_state = NULL;
11197 state->num_private_objs = j;
11203 /* Store the overall update type for use later in atomic check. */
11204 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11205 struct dm_crtc_state *dm_new_crtc_state =
11206 to_dm_crtc_state(new_crtc_state);
11208 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11213 /* Must be success */
11216 trace_amdgpu_dm_atomic_check_finish(state, ret);
11221 if (ret == -EDEADLK)
11222 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11223 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11224 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11226 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11228 trace_amdgpu_dm_atomic_check_finish(state, ret);
11233 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11234 struct amdgpu_dm_connector *amdgpu_dm_connector)
11237 bool capable = false;
11239 if (amdgpu_dm_connector->dc_link &&
11240 dm_helpers_dp_read_dpcd(
11242 amdgpu_dm_connector->dc_link,
11243 DP_DOWN_STREAM_PORT_COUNT,
11245 sizeof(dpcd_data))) {
11246 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11252 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11253 unsigned int offset,
11254 unsigned int total_length,
11256 unsigned int length,
11257 struct amdgpu_hdmi_vsdb_info *vsdb)
11260 union dmub_rb_cmd cmd;
11261 struct dmub_cmd_send_edid_cea *input;
11262 struct dmub_cmd_edid_cea_output *output;
11264 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11267 memset(&cmd, 0, sizeof(cmd));
11269 input = &cmd.edid_cea.data.input;
11271 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11272 cmd.edid_cea.header.sub_type = 0;
11273 cmd.edid_cea.header.payload_bytes =
11274 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11275 input->offset = offset;
11276 input->length = length;
11277 input->total_length = total_length;
11278 memcpy(input->payload, data, length);
11280 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11282 DRM_ERROR("EDID CEA parser failed\n");
11286 output = &cmd.edid_cea.data.output;
11288 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11289 if (!output->ack.success) {
11290 DRM_ERROR("EDID CEA ack failed at offset %d\n",
11291 output->ack.offset);
11293 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11294 if (!output->amd_vsdb.vsdb_found)
11297 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11298 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11299 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11300 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11302 DRM_WARN("Unknown EDID CEA parser results\n");
11309 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11310 uint8_t *edid_ext, int len,
11311 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11315 /* send extension block to DMCU for parsing */
11316 for (i = 0; i < len; i += 8) {
11320 /* send 8 bytes a time */
11321 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11325 /* EDID block sent completed, expect result */
11326 int version, min_rate, max_rate;
11328 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11330 /* amd vsdb found */
11331 vsdb_info->freesync_supported = 1;
11332 vsdb_info->amd_vsdb_version = version;
11333 vsdb_info->min_refresh_rate_hz = min_rate;
11334 vsdb_info->max_refresh_rate_hz = max_rate;
11342 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11350 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11351 uint8_t *edid_ext, int len,
11352 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11356 /* send extension block to DMCU for parsing */
11357 for (i = 0; i < len; i += 8) {
11358 /* send 8 bytes a time */
11359 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11363 return vsdb_info->freesync_supported;
11366 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11367 uint8_t *edid_ext, int len,
11368 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11370 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11372 if (adev->dm.dmub_srv)
11373 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11375 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11378 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11379 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11381 uint8_t *edid_ext = NULL;
11383 bool valid_vsdb_found = false;
11385 /*----- drm_find_cea_extension() -----*/
11386 /* No EDID or EDID extensions */
11387 if (edid == NULL || edid->extensions == 0)
11390 /* Find CEA extension */
11391 for (i = 0; i < edid->extensions; i++) {
11392 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11393 if (edid_ext[0] == CEA_EXT)
11397 if (i == edid->extensions)
11400 /*----- cea_db_offsets() -----*/
11401 if (edid_ext[0] != CEA_EXT)
11404 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11406 return valid_vsdb_found ? i : -ENODEV;
11409 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11413 struct detailed_timing *timing;
11414 struct detailed_non_pixel *data;
11415 struct detailed_data_monitor_range *range;
11416 struct amdgpu_dm_connector *amdgpu_dm_connector =
11417 to_amdgpu_dm_connector(connector);
11418 struct dm_connector_state *dm_con_state = NULL;
11419 struct dc_sink *sink;
11421 struct drm_device *dev = connector->dev;
11422 struct amdgpu_device *adev = drm_to_adev(dev);
11423 bool freesync_capable = false;
11424 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11426 if (!connector->state) {
11427 DRM_ERROR("%s - Connector has no state", __func__);
11431 sink = amdgpu_dm_connector->dc_sink ?
11432 amdgpu_dm_connector->dc_sink :
11433 amdgpu_dm_connector->dc_em_sink;
11435 if (!edid || !sink) {
11436 dm_con_state = to_dm_connector_state(connector->state);
11438 amdgpu_dm_connector->min_vfreq = 0;
11439 amdgpu_dm_connector->max_vfreq = 0;
11440 amdgpu_dm_connector->pixel_clock_mhz = 0;
11441 connector->display_info.monitor_range.min_vfreq = 0;
11442 connector->display_info.monitor_range.max_vfreq = 0;
11443 freesync_capable = false;
11448 dm_con_state = to_dm_connector_state(connector->state);
11450 if (!adev->dm.freesync_module)
11454 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11455 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11456 bool edid_check_required = false;
11459 edid_check_required = is_dp_capable_without_timing_msa(
11461 amdgpu_dm_connector);
11464 if (edid_check_required == true && (edid->version > 1 ||
11465 (edid->version == 1 && edid->revision > 1))) {
11466 for (i = 0; i < 4; i++) {
11468 timing = &edid->detailed_timings[i];
11469 data = &timing->data.other_data;
11470 range = &data->data.range;
11472 * Check if monitor has continuous frequency mode
11474 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11477 * Check for flag range limits only. If flag == 1 then
11478 * no additional timing information provided.
11479 * Default GTF, GTF Secondary curve and CVT are not
11482 if (range->flags != 1)
11485 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11486 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11487 amdgpu_dm_connector->pixel_clock_mhz =
11488 range->pixel_clock_mhz * 10;
11490 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11491 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11496 if (amdgpu_dm_connector->max_vfreq -
11497 amdgpu_dm_connector->min_vfreq > 10) {
11499 freesync_capable = true;
11502 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11503 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11504 if (i >= 0 && vsdb_info.freesync_supported) {
11505 timing = &edid->detailed_timings[i];
11506 data = &timing->data.other_data;
11508 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11509 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11510 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11511 freesync_capable = true;
11513 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11514 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11520 dm_con_state->freesync_capable = freesync_capable;
11522 if (connector->vrr_capable_property)
11523 drm_connector_set_vrr_capable_property(connector,
11527 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11529 struct amdgpu_device *adev = drm_to_adev(dev);
11530 struct dc *dc = adev->dm.dc;
11533 mutex_lock(&adev->dm.dc_lock);
11534 if (dc->current_state) {
11535 for (i = 0; i < dc->current_state->stream_count; ++i)
11536 dc->current_state->streams[i]
11537 ->triggered_crtc_reset.enabled =
11538 adev->dm.force_timing_sync;
11540 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11541 dc_trigger_sync(dc, dc->current_state);
11543 mutex_unlock(&adev->dm.dc_lock);
11546 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11547 uint32_t value, const char *func_name)
11549 #ifdef DM_CHECK_ADDR_0
11550 if (address == 0) {
11551 DC_ERR("invalid register write. address = 0");
11555 cgs_write_register(ctx->cgs_device, address, value);
11556 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11559 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11560 const char *func_name)
11563 #ifdef DM_CHECK_ADDR_0
11564 if (address == 0) {
11565 DC_ERR("invalid register read; address = 0\n");
11570 if (ctx->dmub_srv &&
11571 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11572 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11577 value = cgs_read_register(ctx->cgs_device, address);
11579 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11584 int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
11585 uint8_t status_type, uint32_t *operation_result)
11587 struct amdgpu_device *adev = ctx->driver_context;
11588 int return_status = -1;
11589 struct dmub_notification *p_notify = adev->dm.dmub_notify;
11592 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11593 return_status = p_notify->aux_reply.length;
11594 *operation_result = p_notify->result;
11595 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11596 *operation_result = AUX_RET_ERROR_TIMEOUT;
11597 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11598 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11600 *operation_result = AUX_RET_ERROR_UNKNOWN;
11603 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11605 *operation_result = p_notify->sc_status;
11607 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11611 return return_status;
11614 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11615 unsigned int link_index, void *cmd_payload, void *operation_result)
11617 struct amdgpu_device *adev = ctx->driver_context;
11621 dc_process_dmub_aux_transfer_async(ctx->dc,
11622 link_index, (struct aux_payload *)cmd_payload);
11623 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11624 (struct set_config_cmd_payload *)cmd_payload,
11625 adev->dm.dmub_notify)) {
11626 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11627 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11628 (uint32_t *)operation_result);
11631 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11633 DRM_ERROR("wait_for_completion_timeout timeout!");
11634 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11635 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11636 (uint32_t *)operation_result);
11640 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11641 struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11643 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11644 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11645 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11646 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11647 adev->dm.dmub_notify->aux_reply.length);
11652 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11653 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11654 (uint32_t *)operation_result);
11658 * Check whether seamless boot is supported.
11660 * So far we only support seamless boot on CHIP_VANGOGH.
11661 * If everything goes well, we may consider expanding
11662 * seamless boot to other ASICs.
11664 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11666 switch (adev->asic_type) {
11668 if (!adev->mman.keep_stolen_vga_memory)