2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
53 #include "amdgpu_pm.h"
55 #include "amd_shared.h"
56 #include "amdgpu_dm_irq.h"
57 #include "dm_helpers.h"
58 #include "amdgpu_dm_mst_types.h"
59 #if defined(CONFIG_DEBUG_FS)
60 #include "amdgpu_dm_debugfs.h"
62 #include "amdgpu_dm_psr.h"
64 #include "ivsrcid/ivsrcid_vislands30.h"
66 #include "i2caux_interface.h"
67 #include <linux/module.h>
68 #include <linux/moduleparam.h>
69 #include <linux/types.h>
70 #include <linux/pm_runtime.h>
71 #include <linux/pci.h>
72 #include <linux/firmware.h>
73 #include <linux/component.h>
75 #include <drm/drm_atomic.h>
76 #include <drm/drm_atomic_uapi.h>
77 #include <drm/drm_atomic_helper.h>
78 #include <drm/drm_dp_mst_helper.h>
79 #include <drm/drm_fb_helper.h>
80 #include <drm/drm_fourcc.h>
81 #include <drm/drm_edid.h>
82 #include <drm/drm_vblank.h>
83 #include <drm/drm_audio_component.h>
85 #if defined(CONFIG_DRM_AMD_DC_DCN)
86 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88 #include "dcn/dcn_1_0_offset.h"
89 #include "dcn/dcn_1_0_sh_mask.h"
90 #include "soc15_hw_ip.h"
91 #include "vega10_ip_offset.h"
93 #include "soc15_common.h"
96 #include "modules/inc/mod_freesync.h"
97 #include "modules/power/power_helpers.h"
98 #include "modules/inc/mod_info_packet.h"
100 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
102 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
104 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
106 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
108 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
110 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
112 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
114 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
117 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
118 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
120 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
121 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
123 /* Number of bytes in PSP header for firmware. */
124 #define PSP_HEADER_BYTES 0x100
126 /* Number of bytes in PSP footer for firmware. */
127 #define PSP_FOOTER_BYTES 0x100
132 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
133 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
134 * requests into DC requests, and DC responses into DRM responses.
136 * The root control structure is &struct amdgpu_display_manager.
139 /* basic init/fini API */
140 static int amdgpu_dm_init(struct amdgpu_device *adev);
141 static void amdgpu_dm_fini(struct amdgpu_device *adev);
142 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
144 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
146 switch (link->dpcd_caps.dongle_type) {
147 case DISPLAY_DONGLE_NONE:
148 return DRM_MODE_SUBCONNECTOR_Native;
149 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
150 return DRM_MODE_SUBCONNECTOR_VGA;
151 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
152 case DISPLAY_DONGLE_DP_DVI_DONGLE:
153 return DRM_MODE_SUBCONNECTOR_DVID;
154 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
155 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
156 return DRM_MODE_SUBCONNECTOR_HDMIA;
157 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
159 return DRM_MODE_SUBCONNECTOR_Unknown;
163 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
165 struct dc_link *link = aconnector->dc_link;
166 struct drm_connector *connector = &aconnector->base;
167 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
169 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
172 if (aconnector->dc_sink)
173 subconnector = get_subconnector_type(link);
175 drm_object_property_set_value(&connector->base,
176 connector->dev->mode_config.dp_subconnector_property,
181 * initializes drm_device display related structures, based on the information
182 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
183 * drm_encoder, drm_mode_config
185 * Returns 0 on success
187 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
188 /* removes and deallocates the drm structures, created by the above function */
189 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
191 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
192 struct drm_plane *plane,
193 unsigned long possible_crtcs,
194 const struct dc_plane_cap *plane_cap);
195 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
196 struct drm_plane *plane,
197 uint32_t link_index);
198 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
199 struct amdgpu_dm_connector *amdgpu_dm_connector,
201 struct amdgpu_encoder *amdgpu_encoder);
202 static int amdgpu_dm_encoder_init(struct drm_device *dev,
203 struct amdgpu_encoder *aencoder,
204 uint32_t link_index);
206 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
208 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
210 static int amdgpu_dm_atomic_check(struct drm_device *dev,
211 struct drm_atomic_state *state);
213 static void handle_cursor_update(struct drm_plane *plane,
214 struct drm_plane_state *old_plane_state);
216 static const struct drm_format_info *
217 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
222 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
223 struct drm_crtc_state *new_crtc_state);
225 * dm_vblank_get_counter
228 * Get counter for number of vertical blanks
231 * struct amdgpu_device *adev - [in] desired amdgpu device
232 * int disp_idx - [in] which CRTC to get the counter from
235 * Counter for vertical blanks
237 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
239 if (crtc >= adev->mode_info.num_crtc)
242 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
244 if (acrtc->dm_irq_params.stream == NULL) {
245 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
250 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
254 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
255 u32 *vbl, u32 *position)
257 uint32_t v_blank_start, v_blank_end, h_position, v_position;
259 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
262 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
264 if (acrtc->dm_irq_params.stream == NULL) {
265 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
271 * TODO rework base driver to use values directly.
272 * for now parse it back into reg-format
274 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
280 *position = v_position | (h_position << 16);
281 *vbl = v_blank_start | (v_blank_end << 16);
287 static bool dm_is_idle(void *handle)
293 static int dm_wait_for_idle(void *handle)
299 static bool dm_check_soft_reset(void *handle)
304 static int dm_soft_reset(void *handle)
310 static struct amdgpu_crtc *
311 get_crtc_by_otg_inst(struct amdgpu_device *adev,
314 struct drm_device *dev = adev_to_drm(adev);
315 struct drm_crtc *crtc;
316 struct amdgpu_crtc *amdgpu_crtc;
318 if (WARN_ON(otg_inst == -1))
319 return adev->mode_info.crtcs[0];
321 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
322 amdgpu_crtc = to_amdgpu_crtc(crtc);
324 if (amdgpu_crtc->otg_inst == otg_inst)
331 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
333 return acrtc->dm_irq_params.freesync_config.state ==
334 VRR_STATE_ACTIVE_VARIABLE ||
335 acrtc->dm_irq_params.freesync_config.state ==
336 VRR_STATE_ACTIVE_FIXED;
339 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
341 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
342 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
346 struct dm_crtc_state *new_state)
348 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
350 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
357 * dm_pflip_high_irq() - Handle pageflip interrupt
358 * @interrupt_params: ignored
360 * Handles the pageflip interrupt by notifying all interested parties
361 * that the pageflip has been completed.
363 static void dm_pflip_high_irq(void *interrupt_params)
365 struct amdgpu_crtc *amdgpu_crtc;
366 struct common_irq_params *irq_params = interrupt_params;
367 struct amdgpu_device *adev = irq_params->adev;
369 struct drm_pending_vblank_event *e;
370 uint32_t vpos, hpos, v_blank_start, v_blank_end;
373 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
375 /* IRQ could occur when in initial stage */
376 /* TODO work and BO cleanup */
377 if (amdgpu_crtc == NULL) {
378 DC_LOG_PFLIP("CRTC is null, returning.\n");
382 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
384 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
385 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
386 amdgpu_crtc->pflip_status,
387 AMDGPU_FLIP_SUBMITTED,
388 amdgpu_crtc->crtc_id,
390 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
394 /* page flip completed. */
395 e = amdgpu_crtc->event;
396 amdgpu_crtc->event = NULL;
400 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
402 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
404 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
405 &v_blank_end, &hpos, &vpos) ||
406 (vpos < v_blank_start)) {
407 /* Update to correct count and vblank timestamp if racing with
408 * vblank irq. This also updates to the correct vblank timestamp
409 * even in VRR mode, as scanout is past the front-porch atm.
411 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
413 /* Wake up userspace by sending the pageflip event with proper
414 * count and timestamp of vblank of flip completion.
417 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
419 /* Event sent, so done with vblank for this flip */
420 drm_crtc_vblank_put(&amdgpu_crtc->base);
423 /* VRR active and inside front-porch: vblank count and
424 * timestamp for pageflip event will only be up to date after
425 * drm_crtc_handle_vblank() has been executed from late vblank
426 * irq handler after start of back-porch (vline 0). We queue the
427 * pageflip event for send-out by drm_crtc_handle_vblank() with
428 * updated timestamp and count, once it runs after us.
430 * We need to open-code this instead of using the helper
431 * drm_crtc_arm_vblank_event(), as that helper would
432 * call drm_crtc_accurate_vblank_count(), which we must
433 * not call in VRR mode while we are in front-porch!
436 /* sequence will be replaced by real count during send-out. */
437 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
438 e->pipe = amdgpu_crtc->crtc_id;
440 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
444 /* Keep track of vblank of this flip for flip throttling. We use the
445 * cooked hw counter, as that one incremented at start of this vblank
446 * of pageflip completion, so last_flip_vblank is the forbidden count
447 * for queueing new pageflips if vsync + VRR is enabled.
449 amdgpu_crtc->dm_irq_params.last_flip_vblank =
450 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
452 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
453 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
455 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
456 amdgpu_crtc->crtc_id, amdgpu_crtc,
457 vrr_active, (int) !e);
460 static void dm_vupdate_high_irq(void *interrupt_params)
462 struct common_irq_params *irq_params = interrupt_params;
463 struct amdgpu_device *adev = irq_params->adev;
464 struct amdgpu_crtc *acrtc;
465 struct drm_device *drm_dev;
466 struct drm_vblank_crtc *vblank;
467 ktime_t frame_duration_ns, previous_timestamp;
471 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
474 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
475 drm_dev = acrtc->base.dev;
476 vblank = &drm_dev->vblank[acrtc->base.index];
477 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
478 frame_duration_ns = vblank->time - previous_timestamp;
480 if (frame_duration_ns > 0) {
481 trace_amdgpu_refresh_rate_track(acrtc->base.index,
483 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
484 atomic64_set(&irq_params->previous_timestamp, vblank->time);
487 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
491 /* Core vblank handling is done here after end of front-porch in
492 * vrr mode, as vblank timestamping will give valid results
493 * while now done after front-porch. This will also deliver
494 * page-flip completion events that have been queued to us
495 * if a pageflip happened inside front-porch.
498 drm_crtc_handle_vblank(&acrtc->base);
500 /* BTR processing for pre-DCE12 ASICs */
501 if (acrtc->dm_irq_params.stream &&
502 adev->family < AMDGPU_FAMILY_AI) {
503 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
504 mod_freesync_handle_v_update(
505 adev->dm.freesync_module,
506 acrtc->dm_irq_params.stream,
507 &acrtc->dm_irq_params.vrr_params);
509 dc_stream_adjust_vmin_vmax(
511 acrtc->dm_irq_params.stream,
512 &acrtc->dm_irq_params.vrr_params.adjust);
513 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
520 * dm_crtc_high_irq() - Handles CRTC interrupt
521 * @interrupt_params: used for determining the CRTC instance
523 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
526 static void dm_crtc_high_irq(void *interrupt_params)
528 struct common_irq_params *irq_params = interrupt_params;
529 struct amdgpu_device *adev = irq_params->adev;
530 struct amdgpu_crtc *acrtc;
534 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
538 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
540 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
541 vrr_active, acrtc->dm_irq_params.active_planes);
544 * Core vblank handling at start of front-porch is only possible
545 * in non-vrr mode, as only there vblank timestamping will give
546 * valid results while done in front-porch. Otherwise defer it
547 * to dm_vupdate_high_irq after end of front-porch.
550 drm_crtc_handle_vblank(&acrtc->base);
553 * Following stuff must happen at start of vblank, for crc
554 * computation and below-the-range btr support in vrr mode.
556 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
558 /* BTR updates need to happen before VUPDATE on Vega and above. */
559 if (adev->family < AMDGPU_FAMILY_AI)
562 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
564 if (acrtc->dm_irq_params.stream &&
565 acrtc->dm_irq_params.vrr_params.supported &&
566 acrtc->dm_irq_params.freesync_config.state ==
567 VRR_STATE_ACTIVE_VARIABLE) {
568 mod_freesync_handle_v_update(adev->dm.freesync_module,
569 acrtc->dm_irq_params.stream,
570 &acrtc->dm_irq_params.vrr_params);
572 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
573 &acrtc->dm_irq_params.vrr_params.adjust);
577 * If there aren't any active_planes then DCH HUBP may be clock-gated.
578 * In that case, pageflip completion interrupts won't fire and pageflip
579 * completion events won't get delivered. Prevent this by sending
580 * pending pageflip events from here if a flip is still pending.
582 * If any planes are enabled, use dm_pflip_high_irq() instead, to
583 * avoid race conditions between flip programming and completion,
584 * which could cause too early flip completion events.
586 if (adev->family >= AMDGPU_FAMILY_RV &&
587 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
588 acrtc->dm_irq_params.active_planes == 0) {
590 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
592 drm_crtc_vblank_put(&acrtc->base);
594 acrtc->pflip_status = AMDGPU_FLIP_NONE;
597 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
600 #if defined(CONFIG_DRM_AMD_DC_DCN)
601 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
603 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
604 * DCN generation ASICs
605 * @interrupt_params: interrupt parameters
607 * Used to set crc window/read out crc value at vertical line 0 position
609 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
611 struct common_irq_params *irq_params = interrupt_params;
612 struct amdgpu_device *adev = irq_params->adev;
613 struct amdgpu_crtc *acrtc;
615 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
620 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
625 * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
626 * @adev: amdgpu_device pointer
627 * @notify: dmub notification structure
629 * Dmub AUX or SET_CONFIG command completion processing callback
630 * Copies dmub notification to DM which is to be read by AUX command.
631 * issuing thread and also signals the event to wake up the thread.
633 void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
635 if (adev->dm.dmub_notify)
636 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
637 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
638 complete(&adev->dm.dmub_aux_transfer_done);
642 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
643 * @adev: amdgpu_device pointer
644 * @notify: dmub notification structure
646 * Dmub Hpd interrupt processing callback. Gets displayindex through the
647 * ink index and calls helper to do the processing.
649 void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
651 struct amdgpu_dm_connector *aconnector;
652 struct amdgpu_dm_connector *hpd_aconnector = NULL;
653 struct drm_connector *connector;
654 struct drm_connector_list_iter iter;
655 struct dc_link *link;
656 uint8_t link_index = 0;
657 struct drm_device *dev = adev->dm.ddev;
662 if (notify == NULL) {
663 DRM_ERROR("DMUB HPD callback notification was NULL");
667 if (notify->link_index > adev->dm.dc->link_count) {
668 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
672 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
674 link_index = notify->link_index;
676 link = adev->dm.dc->links[link_index];
678 drm_connector_list_iter_begin(dev, &iter);
679 drm_for_each_connector_iter(connector, &iter) {
680 aconnector = to_amdgpu_dm_connector(connector);
681 if (link && aconnector->dc_link == link) {
682 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
683 hpd_aconnector = aconnector;
687 drm_connector_list_iter_end(&iter);
688 drm_modeset_unlock(&dev->mode_config.connection_mutex);
691 handle_hpd_irq_helper(hpd_aconnector);
695 * register_dmub_notify_callback - Sets callback for DMUB notify
696 * @adev: amdgpu_device pointer
697 * @type: Type of dmub notification
698 * @callback: Dmub interrupt callback function
699 * @dmub_int_thread_offload: offload indicator
701 * API to register a dmub callback handler for a dmub notification
702 * Also sets indicator whether callback processing to be offloaded.
703 * to dmub interrupt handling thread
704 * Return: true if successfully registered, false if there is existing registration
706 bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
707 dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
709 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
710 adev->dm.dmub_callback[type] = callback;
711 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
718 static void dm_handle_hpd_work(struct work_struct *work)
720 struct dmub_hpd_work *dmub_hpd_wrk;
722 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
724 if (!dmub_hpd_wrk->dmub_notify) {
725 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
729 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
730 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
731 dmub_hpd_wrk->dmub_notify);
737 #define DMUB_TRACE_MAX_READ 64
739 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
740 * @interrupt_params: used for determining the Outbox instance
742 * Handles the Outbox Interrupt
745 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
747 struct dmub_notification notify;
748 struct common_irq_params *irq_params = interrupt_params;
749 struct amdgpu_device *adev = irq_params->adev;
750 struct amdgpu_display_manager *dm = &adev->dm;
751 struct dmcub_trace_buf_entry entry = { 0 };
753 struct dmub_hpd_work *dmub_hpd_wrk;
754 struct dc_link *plink = NULL;
756 if (dc_enable_dmub_notifications(adev->dm.dc) &&
757 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
758 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
760 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
763 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
766 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify);
767 if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
768 DRM_ERROR("DM: notify type %d invalid!", notify.type);
771 if (dm->dmub_thread_offload[notify.type] == true) {
772 dmub_hpd_wrk->dmub_notify = ¬ify;
773 dmub_hpd_wrk->adev = adev;
774 if (notify.type == DMUB_NOTIFICATION_HPD) {
775 plink = adev->dm.dc->links[notify.link_index];
779 DP_HPD_PLUG ? true : false;
782 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
784 dm->dmub_callback[notify.type](adev, ¬ify);
786 } while (notify.pending_notification);
791 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
792 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
793 entry.param0, entry.param1);
795 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
796 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
802 } while (count <= DMUB_TRACE_MAX_READ);
804 if (count > DMUB_TRACE_MAX_READ)
805 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
809 static int dm_set_clockgating_state(void *handle,
810 enum amd_clockgating_state state)
815 static int dm_set_powergating_state(void *handle,
816 enum amd_powergating_state state)
821 /* Prototypes of private functions */
822 static int dm_early_init(void* handle);
824 /* Allocate memory for FBC compressed data */
825 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
827 struct drm_device *dev = connector->dev;
828 struct amdgpu_device *adev = drm_to_adev(dev);
829 struct dm_compressor_info *compressor = &adev->dm.compressor;
830 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
831 struct drm_display_mode *mode;
832 unsigned long max_size = 0;
834 if (adev->dm.dc->fbc_compressor == NULL)
837 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
840 if (compressor->bo_ptr)
844 list_for_each_entry(mode, &connector->modes, head) {
845 if (max_size < mode->htotal * mode->vtotal)
846 max_size = mode->htotal * mode->vtotal;
850 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
851 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
852 &compressor->gpu_addr, &compressor->cpu_addr);
855 DRM_ERROR("DM: Failed to initialize FBC\n");
857 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
858 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
865 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
866 int pipe, bool *enabled,
867 unsigned char *buf, int max_bytes)
869 struct drm_device *dev = dev_get_drvdata(kdev);
870 struct amdgpu_device *adev = drm_to_adev(dev);
871 struct drm_connector *connector;
872 struct drm_connector_list_iter conn_iter;
873 struct amdgpu_dm_connector *aconnector;
878 mutex_lock(&adev->dm.audio_lock);
880 drm_connector_list_iter_begin(dev, &conn_iter);
881 drm_for_each_connector_iter(connector, &conn_iter) {
882 aconnector = to_amdgpu_dm_connector(connector);
883 if (aconnector->audio_inst != port)
887 ret = drm_eld_size(connector->eld);
888 memcpy(buf, connector->eld, min(max_bytes, ret));
892 drm_connector_list_iter_end(&conn_iter);
894 mutex_unlock(&adev->dm.audio_lock);
896 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
901 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
902 .get_eld = amdgpu_dm_audio_component_get_eld,
905 static int amdgpu_dm_audio_component_bind(struct device *kdev,
906 struct device *hda_kdev, void *data)
908 struct drm_device *dev = dev_get_drvdata(kdev);
909 struct amdgpu_device *adev = drm_to_adev(dev);
910 struct drm_audio_component *acomp = data;
912 acomp->ops = &amdgpu_dm_audio_component_ops;
914 adev->dm.audio_component = acomp;
919 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
920 struct device *hda_kdev, void *data)
922 struct drm_device *dev = dev_get_drvdata(kdev);
923 struct amdgpu_device *adev = drm_to_adev(dev);
924 struct drm_audio_component *acomp = data;
928 adev->dm.audio_component = NULL;
931 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
932 .bind = amdgpu_dm_audio_component_bind,
933 .unbind = amdgpu_dm_audio_component_unbind,
936 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
943 adev->mode_info.audio.enabled = true;
945 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
947 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
948 adev->mode_info.audio.pin[i].channels = -1;
949 adev->mode_info.audio.pin[i].rate = -1;
950 adev->mode_info.audio.pin[i].bits_per_sample = -1;
951 adev->mode_info.audio.pin[i].status_bits = 0;
952 adev->mode_info.audio.pin[i].category_code = 0;
953 adev->mode_info.audio.pin[i].connected = false;
954 adev->mode_info.audio.pin[i].id =
955 adev->dm.dc->res_pool->audios[i]->inst;
956 adev->mode_info.audio.pin[i].offset = 0;
959 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
963 adev->dm.audio_registered = true;
968 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
973 if (!adev->mode_info.audio.enabled)
976 if (adev->dm.audio_registered) {
977 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
978 adev->dm.audio_registered = false;
981 /* TODO: Disable audio? */
983 adev->mode_info.audio.enabled = false;
986 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
988 struct drm_audio_component *acomp = adev->dm.audio_component;
990 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
991 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
993 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
998 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1000 const struct dmcub_firmware_header_v1_0 *hdr;
1001 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1002 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1003 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1004 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1005 struct abm *abm = adev->dm.dc->res_pool->abm;
1006 struct dmub_srv_hw_params hw_params;
1007 enum dmub_status status;
1008 const unsigned char *fw_inst_const, *fw_bss_data;
1009 uint32_t i, fw_inst_const_size, fw_bss_data_size;
1010 bool has_hw_support;
1013 /* DMUB isn't supported on the ASIC. */
1017 DRM_ERROR("No framebuffer info for DMUB service.\n");
1022 /* Firmware required for DMUB support. */
1023 DRM_ERROR("No firmware provided for DMUB.\n");
1027 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1028 if (status != DMUB_STATUS_OK) {
1029 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1033 if (!has_hw_support) {
1034 DRM_INFO("DMUB unsupported on ASIC\n");
1038 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1040 fw_inst_const = dmub_fw->data +
1041 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1044 fw_bss_data = dmub_fw->data +
1045 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1046 le32_to_cpu(hdr->inst_const_bytes);
1048 /* Copy firmware and bios info into FB memory. */
1049 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1050 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1052 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1054 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1055 * amdgpu_ucode_init_single_fw will load dmub firmware
1056 * fw_inst_const part to cw0; otherwise, the firmware back door load
1057 * will be done by dm_dmub_hw_init
1059 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1060 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1061 fw_inst_const_size);
1064 if (fw_bss_data_size)
1065 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1066 fw_bss_data, fw_bss_data_size);
1068 /* Copy firmware bios info into FB memory. */
1069 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1072 /* Reset regions that need to be reset. */
1073 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1074 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1076 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1077 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1079 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1080 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1082 /* Initialize hardware. */
1083 memset(&hw_params, 0, sizeof(hw_params));
1084 hw_params.fb_base = adev->gmc.fb_start;
1085 hw_params.fb_offset = adev->gmc.aper_base;
1087 /* backdoor load firmware and trigger dmub running */
1088 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1089 hw_params.load_inst_const = true;
1092 hw_params.psp_version = dmcu->psp_version;
1094 for (i = 0; i < fb_info->num_fb; ++i)
1095 hw_params.fb[i] = &fb_info->fb[i];
1097 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1098 if (status != DMUB_STATUS_OK) {
1099 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1103 /* Wait for firmware load to finish. */
1104 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1105 if (status != DMUB_STATUS_OK)
1106 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1108 /* Init DMCU and ABM if available. */
1110 dmcu->funcs->dmcu_init(dmcu);
1111 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1114 if (!adev->dm.dc->ctx->dmub_srv)
1115 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1116 if (!adev->dm.dc->ctx->dmub_srv) {
1117 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1121 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1122 adev->dm.dmcub_fw_version);
1127 #if defined(CONFIG_DRM_AMD_DC_DCN)
1128 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1131 uint32_t logical_addr_low;
1132 uint32_t logical_addr_high;
1133 uint32_t agp_base, agp_bot, agp_top;
1134 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1136 memset(pa_config, 0, sizeof(*pa_config));
1138 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1139 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1141 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1143 * Raven2 has a HW issue that it is unable to use the vram which
1144 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1145 * workaround that increase system aperture high address (add 1)
1146 * to get rid of the VM fault and hardware hang.
1148 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1150 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1153 agp_bot = adev->gmc.agp_start >> 24;
1154 agp_top = adev->gmc.agp_end >> 24;
1157 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1158 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1159 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1160 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1161 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1162 page_table_base.low_part = lower_32_bits(pt_base);
1164 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1165 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1167 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1168 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1169 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1171 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1172 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1173 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1175 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1176 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1177 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1179 pa_config->is_hvm_enabled = 0;
1183 #if defined(CONFIG_DRM_AMD_DC_DCN)
1184 static void vblank_control_worker(struct work_struct *work)
1186 struct vblank_control_work *vblank_work =
1187 container_of(work, struct vblank_control_work, work);
1188 struct amdgpu_display_manager *dm = vblank_work->dm;
1190 mutex_lock(&dm->dc_lock);
1192 if (vblank_work->enable)
1193 dm->active_vblank_irq_count++;
1194 else if(dm->active_vblank_irq_count)
1195 dm->active_vblank_irq_count--;
1197 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1199 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1201 /* Control PSR based on vblank requirements from OS */
1202 if (vblank_work->stream && vblank_work->stream->link) {
1203 if (vblank_work->enable) {
1204 if (vblank_work->stream->link->psr_settings.psr_allow_active)
1205 amdgpu_dm_psr_disable(vblank_work->stream);
1206 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1207 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1208 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1209 amdgpu_dm_psr_enable(vblank_work->stream);
1213 mutex_unlock(&dm->dc_lock);
1215 dc_stream_release(vblank_work->stream);
1222 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1224 struct hpd_rx_irq_offload_work *offload_work;
1225 struct amdgpu_dm_connector *aconnector;
1226 struct dc_link *dc_link;
1227 struct amdgpu_device *adev;
1228 enum dc_connection_type new_connection_type = dc_connection_none;
1229 unsigned long flags;
1231 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1232 aconnector = offload_work->offload_wq->aconnector;
1235 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1239 adev = drm_to_adev(aconnector->base.dev);
1240 dc_link = aconnector->dc_link;
1242 mutex_lock(&aconnector->hpd_lock);
1243 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1244 DRM_ERROR("KMS: Failed to detect connector\n");
1245 mutex_unlock(&aconnector->hpd_lock);
1247 if (new_connection_type == dc_connection_none)
1250 if (amdgpu_in_reset(adev))
1253 mutex_lock(&adev->dm.dc_lock);
1254 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1255 dc_link_dp_handle_automated_test(dc_link);
1256 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1257 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1258 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1259 dc_link_dp_handle_link_loss(dc_link);
1260 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1261 offload_work->offload_wq->is_handling_link_loss = false;
1262 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1264 mutex_unlock(&adev->dm.dc_lock);
1267 kfree(offload_work);
1271 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1273 int max_caps = dc->caps.max_links;
1275 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1277 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1279 if (!hpd_rx_offload_wq)
1283 for (i = 0; i < max_caps; i++) {
1284 hpd_rx_offload_wq[i].wq =
1285 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1287 if (hpd_rx_offload_wq[i].wq == NULL) {
1288 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1292 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1295 return hpd_rx_offload_wq;
1298 static int amdgpu_dm_init(struct amdgpu_device *adev)
1300 struct dc_init_data init_data;
1301 #ifdef CONFIG_DRM_AMD_DC_HDCP
1302 struct dc_callback_init init_params;
1306 adev->dm.ddev = adev_to_drm(adev);
1307 adev->dm.adev = adev;
1309 /* Zero all the fields */
1310 memset(&init_data, 0, sizeof(init_data));
1311 #ifdef CONFIG_DRM_AMD_DC_HDCP
1312 memset(&init_params, 0, sizeof(init_params));
1315 mutex_init(&adev->dm.dc_lock);
1316 mutex_init(&adev->dm.audio_lock);
1317 #if defined(CONFIG_DRM_AMD_DC_DCN)
1318 spin_lock_init(&adev->dm.vblank_lock);
1321 if(amdgpu_dm_irq_init(adev)) {
1322 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1326 init_data.asic_id.chip_family = adev->family;
1328 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1329 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1330 init_data.asic_id.chip_id = adev->pdev->device;
1332 init_data.asic_id.vram_width = adev->gmc.vram_width;
1333 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1334 init_data.asic_id.atombios_base_address =
1335 adev->mode_info.atom_context->bios;
1337 init_data.driver = adev;
1339 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1341 if (!adev->dm.cgs_device) {
1342 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1346 init_data.cgs_device = adev->dm.cgs_device;
1348 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1350 switch (adev->asic_type) {
1353 init_data.flags.gpu_vm_support = true;
1356 switch (adev->ip_versions[DCE_HWIP][0]) {
1357 case IP_VERSION(2, 1, 0):
1358 init_data.flags.gpu_vm_support = true;
1359 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1360 init_data.flags.disable_dmcu = true;
1362 case IP_VERSION(1, 0, 0):
1363 case IP_VERSION(1, 0, 1):
1364 case IP_VERSION(3, 0, 1):
1365 case IP_VERSION(3, 1, 2):
1366 case IP_VERSION(3, 1, 3):
1367 init_data.flags.gpu_vm_support = true;
1369 case IP_VERSION(2, 0, 3):
1370 init_data.flags.disable_dmcu = true;
1378 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1379 init_data.flags.fbc_support = true;
1381 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1382 init_data.flags.multi_mon_pp_mclk_switch = true;
1384 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1385 init_data.flags.disable_fractional_pwm = true;
1387 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1388 init_data.flags.edp_no_power_sequencing = true;
1390 init_data.flags.power_down_display_on_boot = true;
1392 INIT_LIST_HEAD(&adev->dm.da_list);
1393 /* Display Core create. */
1394 adev->dm.dc = dc_create(&init_data);
1397 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1399 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1403 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1404 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1405 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1408 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1409 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1411 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1412 adev->dm.dc->debug.disable_stutter = true;
1414 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1415 adev->dm.dc->debug.disable_dsc = true;
1417 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1418 adev->dm.dc->debug.disable_clock_gate = true;
1420 r = dm_dmub_hw_init(adev);
1422 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1426 dc_hardware_init(adev->dm.dc);
1428 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1429 if (!adev->dm.hpd_rx_offload_wq) {
1430 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1434 #if defined(CONFIG_DRM_AMD_DC_DCN)
1435 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1436 struct dc_phy_addr_space_config pa_config;
1438 mmhub_read_system_context(adev, &pa_config);
1440 // Call the DC init_memory func
1441 dc_setup_system_context(adev->dm.dc, &pa_config);
1445 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1446 if (!adev->dm.freesync_module) {
1448 "amdgpu: failed to initialize freesync_module.\n");
1450 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1451 adev->dm.freesync_module);
1453 amdgpu_dm_init_color_mod();
1455 #if defined(CONFIG_DRM_AMD_DC_DCN)
1456 if (adev->dm.dc->caps.max_links > 0) {
1457 adev->dm.vblank_control_workqueue =
1458 create_singlethread_workqueue("dm_vblank_control_workqueue");
1459 if (!adev->dm.vblank_control_workqueue)
1460 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1464 #ifdef CONFIG_DRM_AMD_DC_HDCP
1465 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1466 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1468 if (!adev->dm.hdcp_workqueue)
1469 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1471 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1473 dc_init_callbacks(adev->dm.dc, &init_params);
1476 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1477 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1479 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1480 init_completion(&adev->dm.dmub_aux_transfer_done);
1481 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1482 if (!adev->dm.dmub_notify) {
1483 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1487 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1488 if (!adev->dm.delayed_hpd_wq) {
1489 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1493 amdgpu_dm_outbox_init(adev);
1494 #if defined(CONFIG_DRM_AMD_DC_DCN)
1495 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1496 dmub_aux_setconfig_callback, false)) {
1497 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1500 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1501 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1507 if (amdgpu_dm_initialize_drm_device(adev)) {
1509 "amdgpu: failed to initialize sw for display support.\n");
1513 /* create fake encoders for MST */
1514 dm_dp_create_fake_mst_encoders(adev);
1516 /* TODO: Add_display_info? */
1518 /* TODO use dynamic cursor width */
1519 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1520 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1522 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1524 "amdgpu: failed to initialize sw for display support.\n");
1529 DRM_DEBUG_DRIVER("KMS initialized.\n");
1533 amdgpu_dm_fini(adev);
1538 static int amdgpu_dm_early_fini(void *handle)
1540 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1542 amdgpu_dm_audio_fini(adev);
1547 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1551 #if defined(CONFIG_DRM_AMD_DC_DCN)
1552 if (adev->dm.vblank_control_workqueue) {
1553 destroy_workqueue(adev->dm.vblank_control_workqueue);
1554 adev->dm.vblank_control_workqueue = NULL;
1558 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1559 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1562 amdgpu_dm_destroy_drm_device(&adev->dm);
1564 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1565 if (adev->dm.crc_rd_wrk) {
1566 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1567 kfree(adev->dm.crc_rd_wrk);
1568 adev->dm.crc_rd_wrk = NULL;
1571 #ifdef CONFIG_DRM_AMD_DC_HDCP
1572 if (adev->dm.hdcp_workqueue) {
1573 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1574 adev->dm.hdcp_workqueue = NULL;
1578 dc_deinit_callbacks(adev->dm.dc);
1581 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1583 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1584 kfree(adev->dm.dmub_notify);
1585 adev->dm.dmub_notify = NULL;
1586 destroy_workqueue(adev->dm.delayed_hpd_wq);
1587 adev->dm.delayed_hpd_wq = NULL;
1590 if (adev->dm.dmub_bo)
1591 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1592 &adev->dm.dmub_bo_gpu_addr,
1593 &adev->dm.dmub_bo_cpu_addr);
1595 if (adev->dm.hpd_rx_offload_wq) {
1596 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1597 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1598 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1599 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1603 kfree(adev->dm.hpd_rx_offload_wq);
1604 adev->dm.hpd_rx_offload_wq = NULL;
1607 /* DC Destroy TODO: Replace destroy DAL */
1609 dc_destroy(&adev->dm.dc);
1611 * TODO: pageflip, vlank interrupt
1613 * amdgpu_dm_irq_fini(adev);
1616 if (adev->dm.cgs_device) {
1617 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1618 adev->dm.cgs_device = NULL;
1620 if (adev->dm.freesync_module) {
1621 mod_freesync_destroy(adev->dm.freesync_module);
1622 adev->dm.freesync_module = NULL;
1625 mutex_destroy(&adev->dm.audio_lock);
1626 mutex_destroy(&adev->dm.dc_lock);
1631 static int load_dmcu_fw(struct amdgpu_device *adev)
1633 const char *fw_name_dmcu = NULL;
1635 const struct dmcu_firmware_header_v1_0 *hdr;
1637 switch(adev->asic_type) {
1638 #if defined(CONFIG_DRM_AMD_DC_SI)
1653 case CHIP_POLARIS11:
1654 case CHIP_POLARIS10:
1655 case CHIP_POLARIS12:
1662 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1665 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1666 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1667 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1668 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1673 switch (adev->ip_versions[DCE_HWIP][0]) {
1674 case IP_VERSION(2, 0, 2):
1675 case IP_VERSION(2, 0, 3):
1676 case IP_VERSION(2, 0, 0):
1677 case IP_VERSION(2, 1, 0):
1678 case IP_VERSION(3, 0, 0):
1679 case IP_VERSION(3, 0, 2):
1680 case IP_VERSION(3, 0, 3):
1681 case IP_VERSION(3, 0, 1):
1682 case IP_VERSION(3, 1, 2):
1683 case IP_VERSION(3, 1, 3):
1688 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1692 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1693 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1697 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1699 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1700 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1701 adev->dm.fw_dmcu = NULL;
1705 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1710 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1712 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1714 release_firmware(adev->dm.fw_dmcu);
1715 adev->dm.fw_dmcu = NULL;
1719 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1720 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1721 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1722 adev->firmware.fw_size +=
1723 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1725 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1726 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1727 adev->firmware.fw_size +=
1728 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1730 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1732 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1737 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1739 struct amdgpu_device *adev = ctx;
1741 return dm_read_reg(adev->dm.dc->ctx, address);
1744 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1747 struct amdgpu_device *adev = ctx;
1749 return dm_write_reg(adev->dm.dc->ctx, address, value);
1752 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1754 struct dmub_srv_create_params create_params;
1755 struct dmub_srv_region_params region_params;
1756 struct dmub_srv_region_info region_info;
1757 struct dmub_srv_fb_params fb_params;
1758 struct dmub_srv_fb_info *fb_info;
1759 struct dmub_srv *dmub_srv;
1760 const struct dmcub_firmware_header_v1_0 *hdr;
1761 const char *fw_name_dmub;
1762 enum dmub_asic dmub_asic;
1763 enum dmub_status status;
1766 switch (adev->ip_versions[DCE_HWIP][0]) {
1767 case IP_VERSION(2, 1, 0):
1768 dmub_asic = DMUB_ASIC_DCN21;
1769 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1770 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1771 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1773 case IP_VERSION(3, 0, 0):
1774 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1775 dmub_asic = DMUB_ASIC_DCN30;
1776 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1778 dmub_asic = DMUB_ASIC_DCN30;
1779 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1782 case IP_VERSION(3, 0, 1):
1783 dmub_asic = DMUB_ASIC_DCN301;
1784 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1786 case IP_VERSION(3, 0, 2):
1787 dmub_asic = DMUB_ASIC_DCN302;
1788 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1790 case IP_VERSION(3, 0, 3):
1791 dmub_asic = DMUB_ASIC_DCN303;
1792 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1794 case IP_VERSION(3, 1, 2):
1795 case IP_VERSION(3, 1, 3):
1796 dmub_asic = DMUB_ASIC_DCN31;
1797 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1801 /* ASIC doesn't support DMUB. */
1805 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1807 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1811 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1813 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1817 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1818 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1820 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1821 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1822 AMDGPU_UCODE_ID_DMCUB;
1823 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1825 adev->firmware.fw_size +=
1826 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1828 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1829 adev->dm.dmcub_fw_version);
1833 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1834 dmub_srv = adev->dm.dmub_srv;
1837 DRM_ERROR("Failed to allocate DMUB service!\n");
1841 memset(&create_params, 0, sizeof(create_params));
1842 create_params.user_ctx = adev;
1843 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1844 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1845 create_params.asic = dmub_asic;
1847 /* Create the DMUB service. */
1848 status = dmub_srv_create(dmub_srv, &create_params);
1849 if (status != DMUB_STATUS_OK) {
1850 DRM_ERROR("Error creating DMUB service: %d\n", status);
1854 /* Calculate the size of all the regions for the DMUB service. */
1855 memset(®ion_params, 0, sizeof(region_params));
1857 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1858 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1859 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1860 region_params.vbios_size = adev->bios_size;
1861 region_params.fw_bss_data = region_params.bss_data_size ?
1862 adev->dm.dmub_fw->data +
1863 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1864 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1865 region_params.fw_inst_const =
1866 adev->dm.dmub_fw->data +
1867 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1870 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1873 if (status != DMUB_STATUS_OK) {
1874 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1879 * Allocate a framebuffer based on the total size of all the regions.
1880 * TODO: Move this into GART.
1882 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1883 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1884 &adev->dm.dmub_bo_gpu_addr,
1885 &adev->dm.dmub_bo_cpu_addr);
1889 /* Rebase the regions on the framebuffer address. */
1890 memset(&fb_params, 0, sizeof(fb_params));
1891 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1892 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1893 fb_params.region_info = ®ion_info;
1895 adev->dm.dmub_fb_info =
1896 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1897 fb_info = adev->dm.dmub_fb_info;
1901 "Failed to allocate framebuffer info for DMUB service!\n");
1905 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1906 if (status != DMUB_STATUS_OK) {
1907 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1914 static int dm_sw_init(void *handle)
1916 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1919 r = dm_dmub_sw_init(adev);
1923 return load_dmcu_fw(adev);
1926 static int dm_sw_fini(void *handle)
1928 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1930 kfree(adev->dm.dmub_fb_info);
1931 adev->dm.dmub_fb_info = NULL;
1933 if (adev->dm.dmub_srv) {
1934 dmub_srv_destroy(adev->dm.dmub_srv);
1935 adev->dm.dmub_srv = NULL;
1938 release_firmware(adev->dm.dmub_fw);
1939 adev->dm.dmub_fw = NULL;
1941 release_firmware(adev->dm.fw_dmcu);
1942 adev->dm.fw_dmcu = NULL;
1947 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1949 struct amdgpu_dm_connector *aconnector;
1950 struct drm_connector *connector;
1951 struct drm_connector_list_iter iter;
1954 drm_connector_list_iter_begin(dev, &iter);
1955 drm_for_each_connector_iter(connector, &iter) {
1956 aconnector = to_amdgpu_dm_connector(connector);
1957 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1958 aconnector->mst_mgr.aux) {
1959 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1961 aconnector->base.base.id);
1963 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1965 DRM_ERROR("DM_MST: Failed to start MST\n");
1966 aconnector->dc_link->type =
1967 dc_connection_single;
1972 drm_connector_list_iter_end(&iter);
1977 static int dm_late_init(void *handle)
1979 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1981 struct dmcu_iram_parameters params;
1982 unsigned int linear_lut[16];
1984 struct dmcu *dmcu = NULL;
1986 dmcu = adev->dm.dc->res_pool->dmcu;
1988 for (i = 0; i < 16; i++)
1989 linear_lut[i] = 0xFFFF * i / 15;
1992 params.backlight_ramping_override = false;
1993 params.backlight_ramping_start = 0xCCCC;
1994 params.backlight_ramping_reduction = 0xCCCCCCCC;
1995 params.backlight_lut_array_size = 16;
1996 params.backlight_lut_array = linear_lut;
1998 /* Min backlight level after ABM reduction, Don't allow below 1%
1999 * 0xFFFF x 0.01 = 0x28F
2001 params.min_abm_backlight = 0x28F;
2002 /* In the case where abm is implemented on dmcub,
2003 * dmcu object will be null.
2004 * ABM 2.4 and up are implemented on dmcub.
2007 if (!dmcu_load_iram(dmcu, params))
2009 } else if (adev->dm.dc->ctx->dmub_srv) {
2010 struct dc_link *edp_links[MAX_NUM_EDP];
2013 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2014 for (i = 0; i < edp_num; i++) {
2015 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2020 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2023 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2025 struct amdgpu_dm_connector *aconnector;
2026 struct drm_connector *connector;
2027 struct drm_connector_list_iter iter;
2028 struct drm_dp_mst_topology_mgr *mgr;
2030 bool need_hotplug = false;
2032 drm_connector_list_iter_begin(dev, &iter);
2033 drm_for_each_connector_iter(connector, &iter) {
2034 aconnector = to_amdgpu_dm_connector(connector);
2035 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2036 aconnector->mst_port)
2039 mgr = &aconnector->mst_mgr;
2042 drm_dp_mst_topology_mgr_suspend(mgr);
2044 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2046 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2047 need_hotplug = true;
2051 drm_connector_list_iter_end(&iter);
2054 drm_kms_helper_hotplug_event(dev);
2057 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2059 struct smu_context *smu = &adev->smu;
2062 if (!is_support_sw_smu(adev))
2065 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2066 * on window driver dc implementation.
2067 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2068 * should be passed to smu during boot up and resume from s3.
2069 * boot up: dc calculate dcn watermark clock settings within dc_create,
2070 * dcn20_resource_construct
2071 * then call pplib functions below to pass the settings to smu:
2072 * smu_set_watermarks_for_clock_ranges
2073 * smu_set_watermarks_table
2074 * navi10_set_watermarks_table
2075 * smu_write_watermarks_table
2077 * For Renoir, clock settings of dcn watermark are also fixed values.
2078 * dc has implemented different flow for window driver:
2079 * dc_hardware_init / dc_set_power_state
2084 * smu_set_watermarks_for_clock_ranges
2085 * renoir_set_watermarks_table
2086 * smu_write_watermarks_table
2089 * dc_hardware_init -> amdgpu_dm_init
2090 * dc_set_power_state --> dm_resume
2092 * therefore, this function apply to navi10/12/14 but not Renoir
2095 switch (adev->ip_versions[DCE_HWIP][0]) {
2096 case IP_VERSION(2, 0, 2):
2097 case IP_VERSION(2, 0, 0):
2103 ret = smu_write_watermarks_table(smu);
2105 DRM_ERROR("Failed to update WMTABLE!\n");
2113 * dm_hw_init() - Initialize DC device
2114 * @handle: The base driver device containing the amdgpu_dm device.
2116 * Initialize the &struct amdgpu_display_manager device. This involves calling
2117 * the initializers of each DM component, then populating the struct with them.
2119 * Although the function implies hardware initialization, both hardware and
2120 * software are initialized here. Splitting them out to their relevant init
2121 * hooks is a future TODO item.
2123 * Some notable things that are initialized here:
2125 * - Display Core, both software and hardware
2126 * - DC modules that we need (freesync and color management)
2127 * - DRM software states
2128 * - Interrupt sources and handlers
2130 * - Debug FS entries, if enabled
2132 static int dm_hw_init(void *handle)
2134 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2135 /* Create DAL display manager */
2136 amdgpu_dm_init(adev);
2137 amdgpu_dm_hpd_init(adev);
2143 * dm_hw_fini() - Teardown DC device
2144 * @handle: The base driver device containing the amdgpu_dm device.
2146 * Teardown components within &struct amdgpu_display_manager that require
2147 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2148 * were loaded. Also flush IRQ workqueues and disable them.
2150 static int dm_hw_fini(void *handle)
2152 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2154 amdgpu_dm_hpd_fini(adev);
2156 amdgpu_dm_irq_fini(adev);
2157 amdgpu_dm_fini(adev);
2162 static int dm_enable_vblank(struct drm_crtc *crtc);
2163 static void dm_disable_vblank(struct drm_crtc *crtc);
2165 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2166 struct dc_state *state, bool enable)
2168 enum dc_irq_source irq_source;
2169 struct amdgpu_crtc *acrtc;
2173 for (i = 0; i < state->stream_count; i++) {
2174 acrtc = get_crtc_by_otg_inst(
2175 adev, state->stream_status[i].primary_otg_inst);
2177 if (acrtc && state->stream_status[i].plane_count != 0) {
2178 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2179 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2180 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2181 acrtc->crtc_id, enable ? "en" : "dis", rc);
2183 DRM_WARN("Failed to %s pflip interrupts\n",
2184 enable ? "enable" : "disable");
2187 rc = dm_enable_vblank(&acrtc->base);
2189 DRM_WARN("Failed to enable vblank interrupts\n");
2191 dm_disable_vblank(&acrtc->base);
2199 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2201 struct dc_state *context = NULL;
2202 enum dc_status res = DC_ERROR_UNEXPECTED;
2204 struct dc_stream_state *del_streams[MAX_PIPES];
2205 int del_streams_count = 0;
2207 memset(del_streams, 0, sizeof(del_streams));
2209 context = dc_create_state(dc);
2210 if (context == NULL)
2211 goto context_alloc_fail;
2213 dc_resource_state_copy_construct_current(dc, context);
2215 /* First remove from context all streams */
2216 for (i = 0; i < context->stream_count; i++) {
2217 struct dc_stream_state *stream = context->streams[i];
2219 del_streams[del_streams_count++] = stream;
2222 /* Remove all planes for removed streams and then remove the streams */
2223 for (i = 0; i < del_streams_count; i++) {
2224 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2225 res = DC_FAIL_DETACH_SURFACES;
2229 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2235 res = dc_validate_global_state(dc, context, false);
2238 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
2242 res = dc_commit_state(dc, context);
2245 dc_release_state(context);
2251 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2255 if (dm->hpd_rx_offload_wq) {
2256 for (i = 0; i < dm->dc->caps.max_links; i++)
2257 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2261 static int dm_suspend(void *handle)
2263 struct amdgpu_device *adev = handle;
2264 struct amdgpu_display_manager *dm = &adev->dm;
2267 if (amdgpu_in_reset(adev)) {
2268 mutex_lock(&dm->dc_lock);
2270 #if defined(CONFIG_DRM_AMD_DC_DCN)
2271 dc_allow_idle_optimizations(adev->dm.dc, false);
2274 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2276 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2278 amdgpu_dm_commit_zero_streams(dm->dc);
2280 amdgpu_dm_irq_suspend(adev);
2282 hpd_rx_irq_work_suspend(dm);
2287 WARN_ON(adev->dm.cached_state);
2288 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2290 s3_handle_mst(adev_to_drm(adev), true);
2292 amdgpu_dm_irq_suspend(adev);
2294 hpd_rx_irq_work_suspend(dm);
2296 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2301 static struct amdgpu_dm_connector *
2302 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2303 struct drm_crtc *crtc)
2306 struct drm_connector_state *new_con_state;
2307 struct drm_connector *connector;
2308 struct drm_crtc *crtc_from_state;
2310 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2311 crtc_from_state = new_con_state->crtc;
2313 if (crtc_from_state == crtc)
2314 return to_amdgpu_dm_connector(connector);
2320 static void emulated_link_detect(struct dc_link *link)
2322 struct dc_sink_init_data sink_init_data = { 0 };
2323 struct display_sink_capability sink_caps = { 0 };
2324 enum dc_edid_status edid_status;
2325 struct dc_context *dc_ctx = link->ctx;
2326 struct dc_sink *sink = NULL;
2327 struct dc_sink *prev_sink = NULL;
2329 link->type = dc_connection_none;
2330 prev_sink = link->local_sink;
2333 dc_sink_release(prev_sink);
2335 switch (link->connector_signal) {
2336 case SIGNAL_TYPE_HDMI_TYPE_A: {
2337 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2338 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2342 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2343 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2344 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2348 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2349 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2350 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2354 case SIGNAL_TYPE_LVDS: {
2355 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2356 sink_caps.signal = SIGNAL_TYPE_LVDS;
2360 case SIGNAL_TYPE_EDP: {
2361 sink_caps.transaction_type =
2362 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2363 sink_caps.signal = SIGNAL_TYPE_EDP;
2367 case SIGNAL_TYPE_DISPLAY_PORT: {
2368 sink_caps.transaction_type =
2369 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2370 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2375 DC_ERROR("Invalid connector type! signal:%d\n",
2376 link->connector_signal);
2380 sink_init_data.link = link;
2381 sink_init_data.sink_signal = sink_caps.signal;
2383 sink = dc_sink_create(&sink_init_data);
2385 DC_ERROR("Failed to create sink!\n");
2389 /* dc_sink_create returns a new reference */
2390 link->local_sink = sink;
2392 edid_status = dm_helpers_read_local_edid(
2397 if (edid_status != EDID_OK)
2398 DC_ERROR("Failed to read EDID");
2402 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2403 struct amdgpu_display_manager *dm)
2406 struct dc_surface_update surface_updates[MAX_SURFACES];
2407 struct dc_plane_info plane_infos[MAX_SURFACES];
2408 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2409 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2410 struct dc_stream_update stream_update;
2414 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2417 dm_error("Failed to allocate update bundle\n");
2421 for (k = 0; k < dc_state->stream_count; k++) {
2422 bundle->stream_update.stream = dc_state->streams[k];
2424 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2425 bundle->surface_updates[m].surface =
2426 dc_state->stream_status->plane_states[m];
2427 bundle->surface_updates[m].surface->force_full_update =
2430 dc_commit_updates_for_stream(
2431 dm->dc, bundle->surface_updates,
2432 dc_state->stream_status->plane_count,
2433 dc_state->streams[k], &bundle->stream_update, dc_state);
2442 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2444 struct dc_stream_state *stream_state;
2445 struct amdgpu_dm_connector *aconnector = link->priv;
2446 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2447 struct dc_stream_update stream_update;
2448 bool dpms_off = true;
2450 memset(&stream_update, 0, sizeof(stream_update));
2451 stream_update.dpms_off = &dpms_off;
2453 mutex_lock(&adev->dm.dc_lock);
2454 stream_state = dc_stream_find_from_link(link);
2456 if (stream_state == NULL) {
2457 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2458 mutex_unlock(&adev->dm.dc_lock);
2462 stream_update.stream = stream_state;
2463 acrtc_state->force_dpms_off = true;
2464 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2465 stream_state, &stream_update,
2466 stream_state->ctx->dc->current_state);
2467 mutex_unlock(&adev->dm.dc_lock);
2470 static int dm_resume(void *handle)
2472 struct amdgpu_device *adev = handle;
2473 struct drm_device *ddev = adev_to_drm(adev);
2474 struct amdgpu_display_manager *dm = &adev->dm;
2475 struct amdgpu_dm_connector *aconnector;
2476 struct drm_connector *connector;
2477 struct drm_connector_list_iter iter;
2478 struct drm_crtc *crtc;
2479 struct drm_crtc_state *new_crtc_state;
2480 struct dm_crtc_state *dm_new_crtc_state;
2481 struct drm_plane *plane;
2482 struct drm_plane_state *new_plane_state;
2483 struct dm_plane_state *dm_new_plane_state;
2484 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2485 enum dc_connection_type new_connection_type = dc_connection_none;
2486 struct dc_state *dc_state;
2489 if (amdgpu_in_reset(adev)) {
2490 dc_state = dm->cached_dc_state;
2492 r = dm_dmub_hw_init(adev);
2494 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2496 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2499 amdgpu_dm_irq_resume_early(adev);
2501 for (i = 0; i < dc_state->stream_count; i++) {
2502 dc_state->streams[i]->mode_changed = true;
2503 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2504 dc_state->stream_status->plane_states[j]->update_flags.raw
2508 #if defined(CONFIG_DRM_AMD_DC_DCN)
2510 * Resource allocation happens for link encoders for newer ASIC in
2511 * dc_validate_global_state, so we need to revalidate it.
2513 * This shouldn't fail (it passed once before), so warn if it does.
2515 WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2518 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2520 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2522 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2524 dc_release_state(dm->cached_dc_state);
2525 dm->cached_dc_state = NULL;
2527 amdgpu_dm_irq_resume_late(adev);
2529 mutex_unlock(&dm->dc_lock);
2533 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2534 dc_release_state(dm_state->context);
2535 dm_state->context = dc_create_state(dm->dc);
2536 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2537 dc_resource_state_construct(dm->dc, dm_state->context);
2539 /* Before powering on DC we need to re-initialize DMUB. */
2540 r = dm_dmub_hw_init(adev);
2542 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2544 /* power on hardware */
2545 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2547 /* program HPD filter */
2551 * early enable HPD Rx IRQ, should be done before set mode as short
2552 * pulse interrupts are used for MST
2554 amdgpu_dm_irq_resume_early(adev);
2556 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2557 s3_handle_mst(ddev, false);
2560 drm_connector_list_iter_begin(ddev, &iter);
2561 drm_for_each_connector_iter(connector, &iter) {
2562 aconnector = to_amdgpu_dm_connector(connector);
2565 * this is the case when traversing through already created
2566 * MST connectors, should be skipped
2568 if (aconnector->mst_port)
2571 mutex_lock(&aconnector->hpd_lock);
2572 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2573 DRM_ERROR("KMS: Failed to detect connector\n");
2575 if (aconnector->base.force && new_connection_type == dc_connection_none)
2576 emulated_link_detect(aconnector->dc_link);
2578 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2580 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2581 aconnector->fake_enable = false;
2583 if (aconnector->dc_sink)
2584 dc_sink_release(aconnector->dc_sink);
2585 aconnector->dc_sink = NULL;
2586 amdgpu_dm_update_connector_after_detect(aconnector);
2587 mutex_unlock(&aconnector->hpd_lock);
2589 drm_connector_list_iter_end(&iter);
2591 /* Force mode set in atomic commit */
2592 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2593 new_crtc_state->active_changed = true;
2596 * atomic_check is expected to create the dc states. We need to release
2597 * them here, since they were duplicated as part of the suspend
2600 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2601 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2602 if (dm_new_crtc_state->stream) {
2603 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2604 dc_stream_release(dm_new_crtc_state->stream);
2605 dm_new_crtc_state->stream = NULL;
2609 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2610 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2611 if (dm_new_plane_state->dc_state) {
2612 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2613 dc_plane_state_release(dm_new_plane_state->dc_state);
2614 dm_new_plane_state->dc_state = NULL;
2618 drm_atomic_helper_resume(ddev, dm->cached_state);
2620 dm->cached_state = NULL;
2622 amdgpu_dm_irq_resume_late(adev);
2624 amdgpu_dm_smu_write_watermarks_table(adev);
2632 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2633 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2634 * the base driver's device list to be initialized and torn down accordingly.
2636 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2639 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2641 .early_init = dm_early_init,
2642 .late_init = dm_late_init,
2643 .sw_init = dm_sw_init,
2644 .sw_fini = dm_sw_fini,
2645 .early_fini = amdgpu_dm_early_fini,
2646 .hw_init = dm_hw_init,
2647 .hw_fini = dm_hw_fini,
2648 .suspend = dm_suspend,
2649 .resume = dm_resume,
2650 .is_idle = dm_is_idle,
2651 .wait_for_idle = dm_wait_for_idle,
2652 .check_soft_reset = dm_check_soft_reset,
2653 .soft_reset = dm_soft_reset,
2654 .set_clockgating_state = dm_set_clockgating_state,
2655 .set_powergating_state = dm_set_powergating_state,
2658 const struct amdgpu_ip_block_version dm_ip_block =
2660 .type = AMD_IP_BLOCK_TYPE_DCE,
2664 .funcs = &amdgpu_dm_funcs,
2674 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2675 .fb_create = amdgpu_display_user_framebuffer_create,
2676 .get_format_info = amd_get_format_info,
2677 .output_poll_changed = drm_fb_helper_output_poll_changed,
2678 .atomic_check = amdgpu_dm_atomic_check,
2679 .atomic_commit = drm_atomic_helper_commit,
2682 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2683 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2686 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2688 u32 max_cll, min_cll, max, min, q, r;
2689 struct amdgpu_dm_backlight_caps *caps;
2690 struct amdgpu_display_manager *dm;
2691 struct drm_connector *conn_base;
2692 struct amdgpu_device *adev;
2693 struct dc_link *link = NULL;
2694 static const u8 pre_computed_values[] = {
2695 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2696 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2699 if (!aconnector || !aconnector->dc_link)
2702 link = aconnector->dc_link;
2703 if (link->connector_signal != SIGNAL_TYPE_EDP)
2706 conn_base = &aconnector->base;
2707 adev = drm_to_adev(conn_base->dev);
2709 for (i = 0; i < dm->num_of_edps; i++) {
2710 if (link == dm->backlight_link[i])
2713 if (i >= dm->num_of_edps)
2715 caps = &dm->backlight_caps[i];
2716 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2717 caps->aux_support = false;
2718 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2719 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2721 if (caps->ext_caps->bits.oled == 1 /*||
2722 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2723 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2724 caps->aux_support = true;
2726 if (amdgpu_backlight == 0)
2727 caps->aux_support = false;
2728 else if (amdgpu_backlight == 1)
2729 caps->aux_support = true;
2731 /* From the specification (CTA-861-G), for calculating the maximum
2732 * luminance we need to use:
2733 * Luminance = 50*2**(CV/32)
2734 * Where CV is a one-byte value.
2735 * For calculating this expression we may need float point precision;
2736 * to avoid this complexity level, we take advantage that CV is divided
2737 * by a constant. From the Euclids division algorithm, we know that CV
2738 * can be written as: CV = 32*q + r. Next, we replace CV in the
2739 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2740 * need to pre-compute the value of r/32. For pre-computing the values
2741 * We just used the following Ruby line:
2742 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2743 * The results of the above expressions can be verified at
2744 * pre_computed_values.
2748 max = (1 << q) * pre_computed_values[r];
2750 // min luminance: maxLum * (CV/255)^2 / 100
2751 q = DIV_ROUND_CLOSEST(min_cll, 255);
2752 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2754 caps->aux_max_input_signal = max;
2755 caps->aux_min_input_signal = min;
2758 void amdgpu_dm_update_connector_after_detect(
2759 struct amdgpu_dm_connector *aconnector)
2761 struct drm_connector *connector = &aconnector->base;
2762 struct drm_device *dev = connector->dev;
2763 struct dc_sink *sink;
2765 /* MST handled by drm_mst framework */
2766 if (aconnector->mst_mgr.mst_state == true)
2769 sink = aconnector->dc_link->local_sink;
2771 dc_sink_retain(sink);
2774 * Edid mgmt connector gets first update only in mode_valid hook and then
2775 * the connector sink is set to either fake or physical sink depends on link status.
2776 * Skip if already done during boot.
2778 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2779 && aconnector->dc_em_sink) {
2782 * For S3 resume with headless use eml_sink to fake stream
2783 * because on resume connector->sink is set to NULL
2785 mutex_lock(&dev->mode_config.mutex);
2788 if (aconnector->dc_sink) {
2789 amdgpu_dm_update_freesync_caps(connector, NULL);
2791 * retain and release below are used to
2792 * bump up refcount for sink because the link doesn't point
2793 * to it anymore after disconnect, so on next crtc to connector
2794 * reshuffle by UMD we will get into unwanted dc_sink release
2796 dc_sink_release(aconnector->dc_sink);
2798 aconnector->dc_sink = sink;
2799 dc_sink_retain(aconnector->dc_sink);
2800 amdgpu_dm_update_freesync_caps(connector,
2803 amdgpu_dm_update_freesync_caps(connector, NULL);
2804 if (!aconnector->dc_sink) {
2805 aconnector->dc_sink = aconnector->dc_em_sink;
2806 dc_sink_retain(aconnector->dc_sink);
2810 mutex_unlock(&dev->mode_config.mutex);
2813 dc_sink_release(sink);
2818 * TODO: temporary guard to look for proper fix
2819 * if this sink is MST sink, we should not do anything
2821 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2822 dc_sink_release(sink);
2826 if (aconnector->dc_sink == sink) {
2828 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2831 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2832 aconnector->connector_id);
2834 dc_sink_release(sink);
2838 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2839 aconnector->connector_id, aconnector->dc_sink, sink);
2841 mutex_lock(&dev->mode_config.mutex);
2844 * 1. Update status of the drm connector
2845 * 2. Send an event and let userspace tell us what to do
2849 * TODO: check if we still need the S3 mode update workaround.
2850 * If yes, put it here.
2852 if (aconnector->dc_sink) {
2853 amdgpu_dm_update_freesync_caps(connector, NULL);
2854 dc_sink_release(aconnector->dc_sink);
2857 aconnector->dc_sink = sink;
2858 dc_sink_retain(aconnector->dc_sink);
2859 if (sink->dc_edid.length == 0) {
2860 aconnector->edid = NULL;
2861 if (aconnector->dc_link->aux_mode) {
2862 drm_dp_cec_unset_edid(
2863 &aconnector->dm_dp_aux.aux);
2867 (struct edid *)sink->dc_edid.raw_edid;
2869 drm_connector_update_edid_property(connector,
2871 if (aconnector->dc_link->aux_mode)
2872 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2876 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2877 update_connector_ext_caps(aconnector);
2879 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2880 amdgpu_dm_update_freesync_caps(connector, NULL);
2881 drm_connector_update_edid_property(connector, NULL);
2882 aconnector->num_modes = 0;
2883 dc_sink_release(aconnector->dc_sink);
2884 aconnector->dc_sink = NULL;
2885 aconnector->edid = NULL;
2886 #ifdef CONFIG_DRM_AMD_DC_HDCP
2887 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2888 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2889 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2893 mutex_unlock(&dev->mode_config.mutex);
2895 update_subconnector_property(aconnector);
2898 dc_sink_release(sink);
2901 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
2903 struct drm_connector *connector = &aconnector->base;
2904 struct drm_device *dev = connector->dev;
2905 enum dc_connection_type new_connection_type = dc_connection_none;
2906 struct amdgpu_device *adev = drm_to_adev(dev);
2907 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2908 struct dm_crtc_state *dm_crtc_state = NULL;
2910 if (adev->dm.disable_hpd_irq)
2913 if (dm_con_state->base.state && dm_con_state->base.crtc)
2914 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
2915 dm_con_state->base.state,
2916 dm_con_state->base.crtc));
2918 * In case of failure or MST no need to update connector status or notify the OS
2919 * since (for MST case) MST does this in its own context.
2921 mutex_lock(&aconnector->hpd_lock);
2923 #ifdef CONFIG_DRM_AMD_DC_HDCP
2924 if (adev->dm.hdcp_workqueue) {
2925 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2926 dm_con_state->update_hdcp = true;
2929 if (aconnector->fake_enable)
2930 aconnector->fake_enable = false;
2932 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2933 DRM_ERROR("KMS: Failed to detect connector\n");
2935 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2936 emulated_link_detect(aconnector->dc_link);
2938 drm_modeset_lock_all(dev);
2939 dm_restore_drm_connector_state(dev, connector);
2940 drm_modeset_unlock_all(dev);
2942 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2943 drm_kms_helper_hotplug_event(dev);
2945 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2946 if (new_connection_type == dc_connection_none &&
2947 aconnector->dc_link->type == dc_connection_none &&
2949 dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
2951 amdgpu_dm_update_connector_after_detect(aconnector);
2953 drm_modeset_lock_all(dev);
2954 dm_restore_drm_connector_state(dev, connector);
2955 drm_modeset_unlock_all(dev);
2957 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2958 drm_kms_helper_hotplug_event(dev);
2960 mutex_unlock(&aconnector->hpd_lock);
2964 static void handle_hpd_irq(void *param)
2966 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2968 handle_hpd_irq_helper(aconnector);
2972 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
2974 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2976 bool new_irq_handled = false;
2978 int dpcd_bytes_to_read;
2980 const int max_process_count = 30;
2981 int process_count = 0;
2983 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2985 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2986 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2987 /* DPCD 0x200 - 0x201 for downstream IRQ */
2988 dpcd_addr = DP_SINK_COUNT;
2990 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2991 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2992 dpcd_addr = DP_SINK_COUNT_ESI;
2995 dret = drm_dp_dpcd_read(
2996 &aconnector->dm_dp_aux.aux,
2999 dpcd_bytes_to_read);
3001 while (dret == dpcd_bytes_to_read &&
3002 process_count < max_process_count) {
3008 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3009 /* handle HPD short pulse irq */
3010 if (aconnector->mst_mgr.mst_state)
3012 &aconnector->mst_mgr,
3016 if (new_irq_handled) {
3017 /* ACK at DPCD to notify down stream */
3018 const int ack_dpcd_bytes_to_write =
3019 dpcd_bytes_to_read - 1;
3021 for (retry = 0; retry < 3; retry++) {
3024 wret = drm_dp_dpcd_write(
3025 &aconnector->dm_dp_aux.aux,
3028 ack_dpcd_bytes_to_write);
3029 if (wret == ack_dpcd_bytes_to_write)
3033 /* check if there is new irq to be handled */
3034 dret = drm_dp_dpcd_read(
3035 &aconnector->dm_dp_aux.aux,
3038 dpcd_bytes_to_read);
3040 new_irq_handled = false;
3046 if (process_count == max_process_count)
3047 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3050 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3051 union hpd_irq_data hpd_irq_data)
3053 struct hpd_rx_irq_offload_work *offload_work =
3054 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3056 if (!offload_work) {
3057 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3061 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3062 offload_work->data = hpd_irq_data;
3063 offload_work->offload_wq = offload_wq;
3065 queue_work(offload_wq->wq, &offload_work->work);
3066 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3069 static void handle_hpd_rx_irq(void *param)
3071 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3072 struct drm_connector *connector = &aconnector->base;
3073 struct drm_device *dev = connector->dev;
3074 struct dc_link *dc_link = aconnector->dc_link;
3075 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3076 bool result = false;
3077 enum dc_connection_type new_connection_type = dc_connection_none;
3078 struct amdgpu_device *adev = drm_to_adev(dev);
3079 union hpd_irq_data hpd_irq_data;
3080 bool link_loss = false;
3081 bool has_left_work = false;
3082 int idx = aconnector->base.index;
3083 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3085 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3087 if (adev->dm.disable_hpd_irq)
3091 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3092 * conflict, after implement i2c helper, this mutex should be
3095 mutex_lock(&aconnector->hpd_lock);
3097 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3098 &link_loss, true, &has_left_work);
3103 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3104 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3108 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3109 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3110 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3111 dm_handle_mst_sideband_msg(aconnector);
3118 spin_lock(&offload_wq->offload_lock);
3119 skip = offload_wq->is_handling_link_loss;
3122 offload_wq->is_handling_link_loss = true;
3124 spin_unlock(&offload_wq->offload_lock);
3127 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3134 if (result && !is_mst_root_connector) {
3135 /* Downstream Port status changed. */
3136 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3137 DRM_ERROR("KMS: Failed to detect connector\n");
3139 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3140 emulated_link_detect(dc_link);
3142 if (aconnector->fake_enable)
3143 aconnector->fake_enable = false;
3145 amdgpu_dm_update_connector_after_detect(aconnector);
3148 drm_modeset_lock_all(dev);
3149 dm_restore_drm_connector_state(dev, connector);
3150 drm_modeset_unlock_all(dev);
3152 drm_kms_helper_hotplug_event(dev);
3153 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3155 if (aconnector->fake_enable)
3156 aconnector->fake_enable = false;
3158 amdgpu_dm_update_connector_after_detect(aconnector);
3161 drm_modeset_lock_all(dev);
3162 dm_restore_drm_connector_state(dev, connector);
3163 drm_modeset_unlock_all(dev);
3165 drm_kms_helper_hotplug_event(dev);
3168 #ifdef CONFIG_DRM_AMD_DC_HDCP
3169 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3170 if (adev->dm.hdcp_workqueue)
3171 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3175 if (dc_link->type != dc_connection_mst_branch)
3176 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3178 mutex_unlock(&aconnector->hpd_lock);
3181 static void register_hpd_handlers(struct amdgpu_device *adev)
3183 struct drm_device *dev = adev_to_drm(adev);
3184 struct drm_connector *connector;
3185 struct amdgpu_dm_connector *aconnector;
3186 const struct dc_link *dc_link;
3187 struct dc_interrupt_params int_params = {0};
3189 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3190 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3192 list_for_each_entry(connector,
3193 &dev->mode_config.connector_list, head) {
3195 aconnector = to_amdgpu_dm_connector(connector);
3196 dc_link = aconnector->dc_link;
3198 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3199 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3200 int_params.irq_source = dc_link->irq_source_hpd;
3202 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3204 (void *) aconnector);
3207 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3209 /* Also register for DP short pulse (hpd_rx). */
3210 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3211 int_params.irq_source = dc_link->irq_source_hpd_rx;
3213 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3215 (void *) aconnector);
3217 if (adev->dm.hpd_rx_offload_wq)
3218 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3224 #if defined(CONFIG_DRM_AMD_DC_SI)
3225 /* Register IRQ sources and initialize IRQ callbacks */
3226 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3228 struct dc *dc = adev->dm.dc;
3229 struct common_irq_params *c_irq_params;
3230 struct dc_interrupt_params int_params = {0};
3233 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3235 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3236 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3239 * Actions of amdgpu_irq_add_id():
3240 * 1. Register a set() function with base driver.
3241 * Base driver will call set() function to enable/disable an
3242 * interrupt in DC hardware.
3243 * 2. Register amdgpu_dm_irq_handler().
3244 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3245 * coming from DC hardware.
3246 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3247 * for acknowledging and handling. */
3249 /* Use VBLANK interrupt */
3250 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3251 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3253 DRM_ERROR("Failed to add crtc irq id!\n");
3257 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3258 int_params.irq_source =
3259 dc_interrupt_to_irq_source(dc, i+1 , 0);
3261 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3263 c_irq_params->adev = adev;
3264 c_irq_params->irq_src = int_params.irq_source;
3266 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3267 dm_crtc_high_irq, c_irq_params);
3270 /* Use GRPH_PFLIP interrupt */
3271 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3272 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3273 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3275 DRM_ERROR("Failed to add page flip irq id!\n");
3279 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3280 int_params.irq_source =
3281 dc_interrupt_to_irq_source(dc, i, 0);
3283 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3285 c_irq_params->adev = adev;
3286 c_irq_params->irq_src = int_params.irq_source;
3288 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3289 dm_pflip_high_irq, c_irq_params);
3294 r = amdgpu_irq_add_id(adev, client_id,
3295 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3297 DRM_ERROR("Failed to add hpd irq id!\n");
3301 register_hpd_handlers(adev);
3307 /* Register IRQ sources and initialize IRQ callbacks */
3308 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3310 struct dc *dc = adev->dm.dc;
3311 struct common_irq_params *c_irq_params;
3312 struct dc_interrupt_params int_params = {0};
3315 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3317 if (adev->family >= AMDGPU_FAMILY_AI)
3318 client_id = SOC15_IH_CLIENTID_DCE;
3320 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3321 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3324 * Actions of amdgpu_irq_add_id():
3325 * 1. Register a set() function with base driver.
3326 * Base driver will call set() function to enable/disable an
3327 * interrupt in DC hardware.
3328 * 2. Register amdgpu_dm_irq_handler().
3329 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3330 * coming from DC hardware.
3331 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3332 * for acknowledging and handling. */
3334 /* Use VBLANK interrupt */
3335 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3336 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3338 DRM_ERROR("Failed to add crtc irq id!\n");
3342 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3343 int_params.irq_source =
3344 dc_interrupt_to_irq_source(dc, i, 0);
3346 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3348 c_irq_params->adev = adev;
3349 c_irq_params->irq_src = int_params.irq_source;
3351 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3352 dm_crtc_high_irq, c_irq_params);
3355 /* Use VUPDATE interrupt */
3356 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3357 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3359 DRM_ERROR("Failed to add vupdate irq id!\n");
3363 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3364 int_params.irq_source =
3365 dc_interrupt_to_irq_source(dc, i, 0);
3367 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3369 c_irq_params->adev = adev;
3370 c_irq_params->irq_src = int_params.irq_source;
3372 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3373 dm_vupdate_high_irq, c_irq_params);
3376 /* Use GRPH_PFLIP interrupt */
3377 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3378 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3379 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3381 DRM_ERROR("Failed to add page flip irq id!\n");
3385 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3386 int_params.irq_source =
3387 dc_interrupt_to_irq_source(dc, i, 0);
3389 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3391 c_irq_params->adev = adev;
3392 c_irq_params->irq_src = int_params.irq_source;
3394 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3395 dm_pflip_high_irq, c_irq_params);
3400 r = amdgpu_irq_add_id(adev, client_id,
3401 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3403 DRM_ERROR("Failed to add hpd irq id!\n");
3407 register_hpd_handlers(adev);
3412 #if defined(CONFIG_DRM_AMD_DC_DCN)
3413 /* Register IRQ sources and initialize IRQ callbacks */
3414 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3416 struct dc *dc = adev->dm.dc;
3417 struct common_irq_params *c_irq_params;
3418 struct dc_interrupt_params int_params = {0};
3421 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3422 static const unsigned int vrtl_int_srcid[] = {
3423 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3424 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3425 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3426 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3427 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3428 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3432 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3433 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3436 * Actions of amdgpu_irq_add_id():
3437 * 1. Register a set() function with base driver.
3438 * Base driver will call set() function to enable/disable an
3439 * interrupt in DC hardware.
3440 * 2. Register amdgpu_dm_irq_handler().
3441 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3442 * coming from DC hardware.
3443 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3444 * for acknowledging and handling.
3447 /* Use VSTARTUP interrupt */
3448 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3449 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3451 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3454 DRM_ERROR("Failed to add crtc irq id!\n");
3458 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3459 int_params.irq_source =
3460 dc_interrupt_to_irq_source(dc, i, 0);
3462 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3464 c_irq_params->adev = adev;
3465 c_irq_params->irq_src = int_params.irq_source;
3467 amdgpu_dm_irq_register_interrupt(
3468 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3471 /* Use otg vertical line interrupt */
3472 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3473 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3474 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3475 vrtl_int_srcid[i], &adev->vline0_irq);
3478 DRM_ERROR("Failed to add vline0 irq id!\n");
3482 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3483 int_params.irq_source =
3484 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3486 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3487 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3491 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3492 - DC_IRQ_SOURCE_DC1_VLINE0];
3494 c_irq_params->adev = adev;
3495 c_irq_params->irq_src = int_params.irq_source;
3497 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3498 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3502 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3503 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3504 * to trigger at end of each vblank, regardless of state of the lock,
3505 * matching DCE behaviour.
3507 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3508 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3510 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3513 DRM_ERROR("Failed to add vupdate irq id!\n");
3517 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3518 int_params.irq_source =
3519 dc_interrupt_to_irq_source(dc, i, 0);
3521 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3523 c_irq_params->adev = adev;
3524 c_irq_params->irq_src = int_params.irq_source;
3526 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3527 dm_vupdate_high_irq, c_irq_params);
3530 /* Use GRPH_PFLIP interrupt */
3531 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3532 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3534 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3536 DRM_ERROR("Failed to add page flip irq id!\n");
3540 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3541 int_params.irq_source =
3542 dc_interrupt_to_irq_source(dc, i, 0);
3544 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3546 c_irq_params->adev = adev;
3547 c_irq_params->irq_src = int_params.irq_source;
3549 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3550 dm_pflip_high_irq, c_irq_params);
3555 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3558 DRM_ERROR("Failed to add hpd irq id!\n");
3562 register_hpd_handlers(adev);
3566 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3567 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3569 struct dc *dc = adev->dm.dc;
3570 struct common_irq_params *c_irq_params;
3571 struct dc_interrupt_params int_params = {0};
3574 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3575 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3577 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3578 &adev->dmub_outbox_irq);
3580 DRM_ERROR("Failed to add outbox irq id!\n");
3584 if (dc->ctx->dmub_srv) {
3585 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3586 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3587 int_params.irq_source =
3588 dc_interrupt_to_irq_source(dc, i, 0);
3590 c_irq_params = &adev->dm.dmub_outbox_params[0];
3592 c_irq_params->adev = adev;
3593 c_irq_params->irq_src = int_params.irq_source;
3595 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3596 dm_dmub_outbox1_low_irq, c_irq_params);
3604 * Acquires the lock for the atomic state object and returns
3605 * the new atomic state.
3607 * This should only be called during atomic check.
3609 static int dm_atomic_get_state(struct drm_atomic_state *state,
3610 struct dm_atomic_state **dm_state)
3612 struct drm_device *dev = state->dev;
3613 struct amdgpu_device *adev = drm_to_adev(dev);
3614 struct amdgpu_display_manager *dm = &adev->dm;
3615 struct drm_private_state *priv_state;
3620 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3621 if (IS_ERR(priv_state))
3622 return PTR_ERR(priv_state);
3624 *dm_state = to_dm_atomic_state(priv_state);
3629 static struct dm_atomic_state *
3630 dm_atomic_get_new_state(struct drm_atomic_state *state)
3632 struct drm_device *dev = state->dev;
3633 struct amdgpu_device *adev = drm_to_adev(dev);
3634 struct amdgpu_display_manager *dm = &adev->dm;
3635 struct drm_private_obj *obj;
3636 struct drm_private_state *new_obj_state;
3639 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3640 if (obj->funcs == dm->atomic_obj.funcs)
3641 return to_dm_atomic_state(new_obj_state);
3647 static struct drm_private_state *
3648 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3650 struct dm_atomic_state *old_state, *new_state;
3652 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3656 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3658 old_state = to_dm_atomic_state(obj->state);
3660 if (old_state && old_state->context)
3661 new_state->context = dc_copy_state(old_state->context);
3663 if (!new_state->context) {
3668 return &new_state->base;
3671 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3672 struct drm_private_state *state)
3674 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3676 if (dm_state && dm_state->context)
3677 dc_release_state(dm_state->context);
3682 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3683 .atomic_duplicate_state = dm_atomic_duplicate_state,
3684 .atomic_destroy_state = dm_atomic_destroy_state,
3687 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3689 struct dm_atomic_state *state;
3692 adev->mode_info.mode_config_initialized = true;
3694 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3695 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3697 adev_to_drm(adev)->mode_config.max_width = 16384;
3698 adev_to_drm(adev)->mode_config.max_height = 16384;
3700 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3701 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3702 /* indicates support for immediate flip */
3703 adev_to_drm(adev)->mode_config.async_page_flip = true;
3705 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3707 state = kzalloc(sizeof(*state), GFP_KERNEL);
3711 state->context = dc_create_state(adev->dm.dc);
3712 if (!state->context) {
3717 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3719 drm_atomic_private_obj_init(adev_to_drm(adev),
3720 &adev->dm.atomic_obj,
3722 &dm_atomic_state_funcs);
3724 r = amdgpu_display_modeset_create_props(adev);
3726 dc_release_state(state->context);
3731 r = amdgpu_dm_audio_init(adev);
3733 dc_release_state(state->context);
3741 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3742 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3743 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3745 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3746 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3748 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3751 #if defined(CONFIG_ACPI)
3752 struct amdgpu_dm_backlight_caps caps;
3754 memset(&caps, 0, sizeof(caps));
3756 if (dm->backlight_caps[bl_idx].caps_valid)
3759 amdgpu_acpi_get_backlight_caps(&caps);
3760 if (caps.caps_valid) {
3761 dm->backlight_caps[bl_idx].caps_valid = true;
3762 if (caps.aux_support)
3764 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3765 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3767 dm->backlight_caps[bl_idx].min_input_signal =
3768 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3769 dm->backlight_caps[bl_idx].max_input_signal =
3770 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3773 if (dm->backlight_caps[bl_idx].aux_support)
3776 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3777 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3781 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3782 unsigned *min, unsigned *max)
3787 if (caps->aux_support) {
3788 // Firmware limits are in nits, DC API wants millinits.
3789 *max = 1000 * caps->aux_max_input_signal;
3790 *min = 1000 * caps->aux_min_input_signal;
3792 // Firmware limits are 8-bit, PWM control is 16-bit.
3793 *max = 0x101 * caps->max_input_signal;
3794 *min = 0x101 * caps->min_input_signal;
3799 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3800 uint32_t brightness)
3804 if (!get_brightness_range(caps, &min, &max))
3807 // Rescale 0..255 to min..max
3808 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3809 AMDGPU_MAX_BL_LEVEL);
3812 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3813 uint32_t brightness)
3817 if (!get_brightness_range(caps, &min, &max))
3820 if (brightness < min)
3822 // Rescale min..max to 0..255
3823 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3827 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3829 u32 user_brightness)
3831 struct amdgpu_dm_backlight_caps caps;
3832 struct dc_link *link;
3836 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3837 caps = dm->backlight_caps[bl_idx];
3839 dm->brightness[bl_idx] = user_brightness;
3840 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3841 link = (struct dc_link *)dm->backlight_link[bl_idx];
3843 /* Change brightness based on AUX property */
3844 if (caps.aux_support) {
3845 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3846 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3848 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3850 rc = dc_link_set_backlight_level(link, brightness, 0);
3852 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3858 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3860 struct amdgpu_display_manager *dm = bl_get_data(bd);
3863 for (i = 0; i < dm->num_of_edps; i++) {
3864 if (bd == dm->backlight_dev[i])
3867 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3869 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3874 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3877 struct amdgpu_dm_backlight_caps caps;
3878 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3880 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3881 caps = dm->backlight_caps[bl_idx];
3883 if (caps.aux_support) {
3887 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3889 return dm->brightness[bl_idx];
3890 return convert_brightness_to_user(&caps, avg);
3892 int ret = dc_link_get_backlight_level(link);
3894 if (ret == DC_ERROR_UNEXPECTED)
3895 return dm->brightness[bl_idx];
3896 return convert_brightness_to_user(&caps, ret);
3900 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3902 struct amdgpu_display_manager *dm = bl_get_data(bd);
3905 for (i = 0; i < dm->num_of_edps; i++) {
3906 if (bd == dm->backlight_dev[i])
3909 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3911 return amdgpu_dm_backlight_get_level(dm, i);
3914 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3915 .options = BL_CORE_SUSPENDRESUME,
3916 .get_brightness = amdgpu_dm_backlight_get_brightness,
3917 .update_status = amdgpu_dm_backlight_update_status,
3921 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3924 struct backlight_properties props = { 0 };
3926 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
3927 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
3929 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3930 props.brightness = AMDGPU_MAX_BL_LEVEL;
3931 props.type = BACKLIGHT_RAW;
3933 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3934 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
3936 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
3937 adev_to_drm(dm->adev)->dev,
3939 &amdgpu_dm_backlight_ops,
3942 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
3943 DRM_ERROR("DM: Backlight registration failed!\n");
3945 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3949 static int initialize_plane(struct amdgpu_display_manager *dm,
3950 struct amdgpu_mode_info *mode_info, int plane_id,
3951 enum drm_plane_type plane_type,
3952 const struct dc_plane_cap *plane_cap)
3954 struct drm_plane *plane;
3955 unsigned long possible_crtcs;
3958 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3960 DRM_ERROR("KMS: Failed to allocate plane\n");
3963 plane->type = plane_type;
3966 * HACK: IGT tests expect that the primary plane for a CRTC
3967 * can only have one possible CRTC. Only expose support for
3968 * any CRTC if they're not going to be used as a primary plane
3969 * for a CRTC - like overlay or underlay planes.
3971 possible_crtcs = 1 << plane_id;
3972 if (plane_id >= dm->dc->caps.max_streams)
3973 possible_crtcs = 0xff;
3975 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3978 DRM_ERROR("KMS: Failed to initialize plane\n");
3984 mode_info->planes[plane_id] = plane;
3990 static void register_backlight_device(struct amdgpu_display_manager *dm,
3991 struct dc_link *link)
3993 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3994 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3996 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3997 link->type != dc_connection_none) {
3999 * Event if registration failed, we should continue with
4000 * DM initialization because not having a backlight control
4001 * is better then a black screen.
4003 if (!dm->backlight_dev[dm->num_of_edps])
4004 amdgpu_dm_register_backlight_device(dm);
4006 if (dm->backlight_dev[dm->num_of_edps]) {
4007 dm->backlight_link[dm->num_of_edps] = link;
4016 * In this architecture, the association
4017 * connector -> encoder -> crtc
4018 * id not really requried. The crtc and connector will hold the
4019 * display_index as an abstraction to use with DAL component
4021 * Returns 0 on success
4023 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4025 struct amdgpu_display_manager *dm = &adev->dm;
4027 struct amdgpu_dm_connector *aconnector = NULL;
4028 struct amdgpu_encoder *aencoder = NULL;
4029 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4031 int32_t primary_planes;
4032 enum dc_connection_type new_connection_type = dc_connection_none;
4033 const struct dc_plane_cap *plane;
4035 dm->display_indexes_num = dm->dc->caps.max_streams;
4036 /* Update the actual used number of crtc */
4037 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4039 link_cnt = dm->dc->caps.max_links;
4040 if (amdgpu_dm_mode_config_init(dm->adev)) {
4041 DRM_ERROR("DM: Failed to initialize mode config\n");
4045 /* There is one primary plane per CRTC */
4046 primary_planes = dm->dc->caps.max_streams;
4047 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4050 * Initialize primary planes, implicit planes for legacy IOCTLS.
4051 * Order is reversed to match iteration order in atomic check.
4053 for (i = (primary_planes - 1); i >= 0; i--) {
4054 plane = &dm->dc->caps.planes[i];
4056 if (initialize_plane(dm, mode_info, i,
4057 DRM_PLANE_TYPE_PRIMARY, plane)) {
4058 DRM_ERROR("KMS: Failed to initialize primary plane\n");
4064 * Initialize overlay planes, index starting after primary planes.
4065 * These planes have a higher DRM index than the primary planes since
4066 * they should be considered as having a higher z-order.
4067 * Order is reversed to match iteration order in atomic check.
4069 * Only support DCN for now, and only expose one so we don't encourage
4070 * userspace to use up all the pipes.
4072 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4073 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4075 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4078 if (!plane->blends_with_above || !plane->blends_with_below)
4081 if (!plane->pixel_format_support.argb8888)
4084 if (initialize_plane(dm, NULL, primary_planes + i,
4085 DRM_PLANE_TYPE_OVERLAY, plane)) {
4086 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4090 /* Only create one overlay plane. */
4094 for (i = 0; i < dm->dc->caps.max_streams; i++)
4095 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4096 DRM_ERROR("KMS: Failed to initialize crtc\n");
4100 #if defined(CONFIG_DRM_AMD_DC_DCN)
4101 /* Use Outbox interrupt */
4102 switch (adev->ip_versions[DCE_HWIP][0]) {
4103 case IP_VERSION(3, 0, 0):
4104 case IP_VERSION(3, 1, 2):
4105 case IP_VERSION(3, 1, 3):
4106 case IP_VERSION(2, 1, 0):
4107 if (register_outbox_irq_handlers(dm->adev)) {
4108 DRM_ERROR("DM: Failed to initialize IRQ\n");
4113 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4114 adev->ip_versions[DCE_HWIP][0]);
4118 /* loops over all connectors on the board */
4119 for (i = 0; i < link_cnt; i++) {
4120 struct dc_link *link = NULL;
4122 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4124 "KMS: Cannot support more than %d display indexes\n",
4125 AMDGPU_DM_MAX_DISPLAY_INDEX);
4129 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4133 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4137 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4138 DRM_ERROR("KMS: Failed to initialize encoder\n");
4142 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4143 DRM_ERROR("KMS: Failed to initialize connector\n");
4147 link = dc_get_link_at_index(dm->dc, i);
4149 if (!dc_link_detect_sink(link, &new_connection_type))
4150 DRM_ERROR("KMS: Failed to detect connector\n");
4152 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4153 emulated_link_detect(link);
4154 amdgpu_dm_update_connector_after_detect(aconnector);
4156 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4157 amdgpu_dm_update_connector_after_detect(aconnector);
4158 register_backlight_device(dm, link);
4159 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
4160 amdgpu_dm_set_psr_caps(link);
4166 /* Software is initialized. Now we can register interrupt handlers. */
4167 switch (adev->asic_type) {
4168 #if defined(CONFIG_DRM_AMD_DC_SI)
4173 if (dce60_register_irq_handlers(dm->adev)) {
4174 DRM_ERROR("DM: Failed to initialize IRQ\n");
4188 case CHIP_POLARIS11:
4189 case CHIP_POLARIS10:
4190 case CHIP_POLARIS12:
4195 if (dce110_register_irq_handlers(dm->adev)) {
4196 DRM_ERROR("DM: Failed to initialize IRQ\n");
4201 #if defined(CONFIG_DRM_AMD_DC_DCN)
4202 switch (adev->ip_versions[DCE_HWIP][0]) {
4203 case IP_VERSION(1, 0, 0):
4204 case IP_VERSION(1, 0, 1):
4205 case IP_VERSION(2, 0, 2):
4206 case IP_VERSION(2, 0, 3):
4207 case IP_VERSION(2, 0, 0):
4208 case IP_VERSION(2, 1, 0):
4209 case IP_VERSION(3, 0, 0):
4210 case IP_VERSION(3, 0, 2):
4211 case IP_VERSION(3, 0, 3):
4212 case IP_VERSION(3, 0, 1):
4213 case IP_VERSION(3, 1, 2):
4214 case IP_VERSION(3, 1, 3):
4215 if (dcn10_register_irq_handlers(dm->adev)) {
4216 DRM_ERROR("DM: Failed to initialize IRQ\n");
4221 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4222 adev->ip_versions[DCE_HWIP][0]);
4237 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4239 drm_atomic_private_obj_fini(&dm->atomic_obj);
4243 /******************************************************************************
4244 * amdgpu_display_funcs functions
4245 *****************************************************************************/
4248 * dm_bandwidth_update - program display watermarks
4250 * @adev: amdgpu_device pointer
4252 * Calculate and program the display watermarks and line buffer allocation.
4254 static void dm_bandwidth_update(struct amdgpu_device *adev)
4256 /* TODO: implement later */
4259 static const struct amdgpu_display_funcs dm_display_funcs = {
4260 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4261 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4262 .backlight_set_level = NULL, /* never called for DC */
4263 .backlight_get_level = NULL, /* never called for DC */
4264 .hpd_sense = NULL,/* called unconditionally */
4265 .hpd_set_polarity = NULL, /* called unconditionally */
4266 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4267 .page_flip_get_scanoutpos =
4268 dm_crtc_get_scanoutpos,/* called unconditionally */
4269 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4270 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4273 #if defined(CONFIG_DEBUG_KERNEL_DC)
4275 static ssize_t s3_debug_store(struct device *device,
4276 struct device_attribute *attr,
4282 struct drm_device *drm_dev = dev_get_drvdata(device);
4283 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4285 ret = kstrtoint(buf, 0, &s3_state);
4290 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4295 return ret == 0 ? count : 0;
4298 DEVICE_ATTR_WO(s3_debug);
4302 static int dm_early_init(void *handle)
4304 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4306 switch (adev->asic_type) {
4307 #if defined(CONFIG_DRM_AMD_DC_SI)
4311 adev->mode_info.num_crtc = 6;
4312 adev->mode_info.num_hpd = 6;
4313 adev->mode_info.num_dig = 6;
4316 adev->mode_info.num_crtc = 2;
4317 adev->mode_info.num_hpd = 2;
4318 adev->mode_info.num_dig = 2;
4323 adev->mode_info.num_crtc = 6;
4324 adev->mode_info.num_hpd = 6;
4325 adev->mode_info.num_dig = 6;
4328 adev->mode_info.num_crtc = 4;
4329 adev->mode_info.num_hpd = 6;
4330 adev->mode_info.num_dig = 7;
4334 adev->mode_info.num_crtc = 2;
4335 adev->mode_info.num_hpd = 6;
4336 adev->mode_info.num_dig = 6;
4340 adev->mode_info.num_crtc = 6;
4341 adev->mode_info.num_hpd = 6;
4342 adev->mode_info.num_dig = 7;
4345 adev->mode_info.num_crtc = 3;
4346 adev->mode_info.num_hpd = 6;
4347 adev->mode_info.num_dig = 9;
4350 adev->mode_info.num_crtc = 2;
4351 adev->mode_info.num_hpd = 6;
4352 adev->mode_info.num_dig = 9;
4354 case CHIP_POLARIS11:
4355 case CHIP_POLARIS12:
4356 adev->mode_info.num_crtc = 5;
4357 adev->mode_info.num_hpd = 5;
4358 adev->mode_info.num_dig = 5;
4360 case CHIP_POLARIS10:
4362 adev->mode_info.num_crtc = 6;
4363 adev->mode_info.num_hpd = 6;
4364 adev->mode_info.num_dig = 6;
4369 adev->mode_info.num_crtc = 6;
4370 adev->mode_info.num_hpd = 6;
4371 adev->mode_info.num_dig = 6;
4374 #if defined(CONFIG_DRM_AMD_DC_DCN)
4375 switch (adev->ip_versions[DCE_HWIP][0]) {
4376 case IP_VERSION(2, 0, 2):
4377 case IP_VERSION(3, 0, 0):
4378 adev->mode_info.num_crtc = 6;
4379 adev->mode_info.num_hpd = 6;
4380 adev->mode_info.num_dig = 6;
4382 case IP_VERSION(2, 0, 0):
4383 case IP_VERSION(3, 0, 2):
4384 adev->mode_info.num_crtc = 5;
4385 adev->mode_info.num_hpd = 5;
4386 adev->mode_info.num_dig = 5;
4388 case IP_VERSION(2, 0, 3):
4389 case IP_VERSION(3, 0, 3):
4390 adev->mode_info.num_crtc = 2;
4391 adev->mode_info.num_hpd = 2;
4392 adev->mode_info.num_dig = 2;
4394 case IP_VERSION(1, 0, 0):
4395 case IP_VERSION(1, 0, 1):
4396 case IP_VERSION(3, 0, 1):
4397 case IP_VERSION(2, 1, 0):
4398 case IP_VERSION(3, 1, 2):
4399 case IP_VERSION(3, 1, 3):
4400 adev->mode_info.num_crtc = 4;
4401 adev->mode_info.num_hpd = 4;
4402 adev->mode_info.num_dig = 4;
4405 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4406 adev->ip_versions[DCE_HWIP][0]);
4413 amdgpu_dm_set_irq_funcs(adev);
4415 if (adev->mode_info.funcs == NULL)
4416 adev->mode_info.funcs = &dm_display_funcs;
4419 * Note: Do NOT change adev->audio_endpt_rreg and
4420 * adev->audio_endpt_wreg because they are initialised in
4421 * amdgpu_device_init()
4423 #if defined(CONFIG_DEBUG_KERNEL_DC)
4425 adev_to_drm(adev)->dev,
4426 &dev_attr_s3_debug);
4432 static bool modeset_required(struct drm_crtc_state *crtc_state,
4433 struct dc_stream_state *new_stream,
4434 struct dc_stream_state *old_stream)
4436 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4439 static bool modereset_required(struct drm_crtc_state *crtc_state)
4441 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4444 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4446 drm_encoder_cleanup(encoder);
4450 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4451 .destroy = amdgpu_dm_encoder_destroy,
4455 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4456 struct drm_framebuffer *fb,
4457 int *min_downscale, int *max_upscale)
4459 struct amdgpu_device *adev = drm_to_adev(dev);
4460 struct dc *dc = adev->dm.dc;
4461 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4462 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4464 switch (fb->format->format) {
4465 case DRM_FORMAT_P010:
4466 case DRM_FORMAT_NV12:
4467 case DRM_FORMAT_NV21:
4468 *max_upscale = plane_cap->max_upscale_factor.nv12;
4469 *min_downscale = plane_cap->max_downscale_factor.nv12;
4472 case DRM_FORMAT_XRGB16161616F:
4473 case DRM_FORMAT_ARGB16161616F:
4474 case DRM_FORMAT_XBGR16161616F:
4475 case DRM_FORMAT_ABGR16161616F:
4476 *max_upscale = plane_cap->max_upscale_factor.fp16;
4477 *min_downscale = plane_cap->max_downscale_factor.fp16;
4481 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4482 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4487 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4488 * scaling factor of 1.0 == 1000 units.
4490 if (*max_upscale == 1)
4491 *max_upscale = 1000;
4493 if (*min_downscale == 1)
4494 *min_downscale = 1000;
4498 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4499 struct dc_scaling_info *scaling_info)
4501 int scale_w, scale_h, min_downscale, max_upscale;
4503 memset(scaling_info, 0, sizeof(*scaling_info));
4505 /* Source is fixed 16.16 but we ignore mantissa for now... */
4506 scaling_info->src_rect.x = state->src_x >> 16;
4507 scaling_info->src_rect.y = state->src_y >> 16;
4510 * For reasons we don't (yet) fully understand a non-zero
4511 * src_y coordinate into an NV12 buffer can cause a
4512 * system hang. To avoid hangs (and maybe be overly cautious)
4513 * let's reject both non-zero src_x and src_y.
4515 * We currently know of only one use-case to reproduce a
4516 * scenario with non-zero src_x and src_y for NV12, which
4517 * is to gesture the YouTube Android app into full screen
4521 state->fb->format->format == DRM_FORMAT_NV12 &&
4522 (scaling_info->src_rect.x != 0 ||
4523 scaling_info->src_rect.y != 0))
4526 scaling_info->src_rect.width = state->src_w >> 16;
4527 if (scaling_info->src_rect.width == 0)
4530 scaling_info->src_rect.height = state->src_h >> 16;
4531 if (scaling_info->src_rect.height == 0)
4534 scaling_info->dst_rect.x = state->crtc_x;
4535 scaling_info->dst_rect.y = state->crtc_y;
4537 if (state->crtc_w == 0)
4540 scaling_info->dst_rect.width = state->crtc_w;
4542 if (state->crtc_h == 0)
4545 scaling_info->dst_rect.height = state->crtc_h;
4547 /* DRM doesn't specify clipping on destination output. */
4548 scaling_info->clip_rect = scaling_info->dst_rect;
4550 /* Validate scaling per-format with DC plane caps */
4551 if (state->plane && state->plane->dev && state->fb) {
4552 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4553 &min_downscale, &max_upscale);
4555 min_downscale = 250;
4556 max_upscale = 16000;
4559 scale_w = scaling_info->dst_rect.width * 1000 /
4560 scaling_info->src_rect.width;
4562 if (scale_w < min_downscale || scale_w > max_upscale)
4565 scale_h = scaling_info->dst_rect.height * 1000 /
4566 scaling_info->src_rect.height;
4568 if (scale_h < min_downscale || scale_h > max_upscale)
4572 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4573 * assume reasonable defaults based on the format.
4580 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4581 uint64_t tiling_flags)
4583 /* Fill GFX8 params */
4584 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4585 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4587 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4588 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4589 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4590 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4591 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4593 /* XXX fix me for VI */
4594 tiling_info->gfx8.num_banks = num_banks;
4595 tiling_info->gfx8.array_mode =
4596 DC_ARRAY_2D_TILED_THIN1;
4597 tiling_info->gfx8.tile_split = tile_split;
4598 tiling_info->gfx8.bank_width = bankw;
4599 tiling_info->gfx8.bank_height = bankh;
4600 tiling_info->gfx8.tile_aspect = mtaspect;
4601 tiling_info->gfx8.tile_mode =
4602 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4603 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4604 == DC_ARRAY_1D_TILED_THIN1) {
4605 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4608 tiling_info->gfx8.pipe_config =
4609 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4613 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4614 union dc_tiling_info *tiling_info)
4616 tiling_info->gfx9.num_pipes =
4617 adev->gfx.config.gb_addr_config_fields.num_pipes;
4618 tiling_info->gfx9.num_banks =
4619 adev->gfx.config.gb_addr_config_fields.num_banks;
4620 tiling_info->gfx9.pipe_interleave =
4621 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4622 tiling_info->gfx9.num_shader_engines =
4623 adev->gfx.config.gb_addr_config_fields.num_se;
4624 tiling_info->gfx9.max_compressed_frags =
4625 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4626 tiling_info->gfx9.num_rb_per_se =
4627 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4628 tiling_info->gfx9.shaderEnable = 1;
4629 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4630 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4634 validate_dcc(struct amdgpu_device *adev,
4635 const enum surface_pixel_format format,
4636 const enum dc_rotation_angle rotation,
4637 const union dc_tiling_info *tiling_info,
4638 const struct dc_plane_dcc_param *dcc,
4639 const struct dc_plane_address *address,
4640 const struct plane_size *plane_size)
4642 struct dc *dc = adev->dm.dc;
4643 struct dc_dcc_surface_param input;
4644 struct dc_surface_dcc_cap output;
4646 memset(&input, 0, sizeof(input));
4647 memset(&output, 0, sizeof(output));
4652 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4653 !dc->cap_funcs.get_dcc_compression_cap)
4656 input.format = format;
4657 input.surface_size.width = plane_size->surface_size.width;
4658 input.surface_size.height = plane_size->surface_size.height;
4659 input.swizzle_mode = tiling_info->gfx9.swizzle;
4661 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4662 input.scan = SCAN_DIRECTION_HORIZONTAL;
4663 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4664 input.scan = SCAN_DIRECTION_VERTICAL;
4666 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4669 if (!output.capable)
4672 if (dcc->independent_64b_blks == 0 &&
4673 output.grph.rgb.independent_64b_blks != 0)
4680 modifier_has_dcc(uint64_t modifier)
4682 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4686 modifier_gfx9_swizzle_mode(uint64_t modifier)
4688 if (modifier == DRM_FORMAT_MOD_LINEAR)
4691 return AMD_FMT_MOD_GET(TILE, modifier);
4694 static const struct drm_format_info *
4695 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4697 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4701 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4702 union dc_tiling_info *tiling_info,
4705 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4706 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4707 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4708 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4710 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4712 if (!IS_AMD_FMT_MOD(modifier))
4715 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4716 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4718 if (adev->family >= AMDGPU_FAMILY_NV) {
4719 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4721 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4723 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4727 enum dm_micro_swizzle {
4728 MICRO_SWIZZLE_Z = 0,
4729 MICRO_SWIZZLE_S = 1,
4730 MICRO_SWIZZLE_D = 2,
4734 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4738 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4739 const struct drm_format_info *info = drm_format_info(format);
4742 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4748 * We always have to allow these modifiers:
4749 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4750 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4752 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4753 modifier == DRM_FORMAT_MOD_INVALID) {
4757 /* Check that the modifier is on the list of the plane's supported modifiers. */
4758 for (i = 0; i < plane->modifier_count; i++) {
4759 if (modifier == plane->modifiers[i])
4762 if (i == plane->modifier_count)
4766 * For D swizzle the canonical modifier depends on the bpp, so check
4769 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4770 adev->family >= AMDGPU_FAMILY_NV) {
4771 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4775 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4779 if (modifier_has_dcc(modifier)) {
4780 /* Per radeonsi comments 16/64 bpp are more complicated. */
4781 if (info->cpp[0] != 4)
4783 /* We support multi-planar formats, but not when combined with
4784 * additional DCC metadata planes. */
4785 if (info->num_planes > 1)
4793 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4798 if (*cap - *size < 1) {
4799 uint64_t new_cap = *cap * 2;
4800 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4808 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4814 (*mods)[*size] = mod;
4819 add_gfx9_modifiers(const struct amdgpu_device *adev,
4820 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4822 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4823 int pipe_xor_bits = min(8, pipes +
4824 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4825 int bank_xor_bits = min(8 - pipe_xor_bits,
4826 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4827 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4828 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4831 if (adev->family == AMDGPU_FAMILY_RV) {
4832 /* Raven2 and later */
4833 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4836 * No _D DCC swizzles yet because we only allow 32bpp, which
4837 * doesn't support _D on DCN
4840 if (has_constant_encode) {
4841 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4842 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4843 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4844 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4845 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4846 AMD_FMT_MOD_SET(DCC, 1) |
4847 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4848 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4849 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4852 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4853 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4854 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4855 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4856 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4857 AMD_FMT_MOD_SET(DCC, 1) |
4858 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4859 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4860 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4862 if (has_constant_encode) {
4863 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4864 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4865 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4866 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4867 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4868 AMD_FMT_MOD_SET(DCC, 1) |
4869 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4870 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4871 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4873 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4874 AMD_FMT_MOD_SET(RB, rb) |
4875 AMD_FMT_MOD_SET(PIPE, pipes));
4878 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4879 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4880 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4881 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4882 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4883 AMD_FMT_MOD_SET(DCC, 1) |
4884 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4885 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4886 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4887 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4888 AMD_FMT_MOD_SET(RB, rb) |
4889 AMD_FMT_MOD_SET(PIPE, pipes));
4893 * Only supported for 64bpp on Raven, will be filtered on format in
4894 * dm_plane_format_mod_supported.
4896 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4897 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4898 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4899 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4900 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4902 if (adev->family == AMDGPU_FAMILY_RV) {
4903 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4904 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4905 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4906 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4907 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4911 * Only supported for 64bpp on Raven, will be filtered on format in
4912 * dm_plane_format_mod_supported.
4914 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4915 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4916 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4918 if (adev->family == AMDGPU_FAMILY_RV) {
4919 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4920 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4921 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4926 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4927 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4929 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4931 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4932 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4933 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4934 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4935 AMD_FMT_MOD_SET(DCC, 1) |
4936 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4937 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4938 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4940 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4941 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4942 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4943 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4944 AMD_FMT_MOD_SET(DCC, 1) |
4945 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4946 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4947 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4948 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4950 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4951 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4952 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4953 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4955 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4956 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4957 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4958 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4961 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4962 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4963 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4964 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4966 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4967 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4968 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4972 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4973 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4975 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4976 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4978 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4979 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4980 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4981 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4982 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4983 AMD_FMT_MOD_SET(DCC, 1) |
4984 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4985 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4986 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4987 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4989 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4990 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4991 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4992 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4993 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4994 AMD_FMT_MOD_SET(DCC, 1) |
4995 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4996 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4997 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4999 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5000 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5001 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5002 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5003 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5004 AMD_FMT_MOD_SET(DCC, 1) |
5005 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5006 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5007 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5008 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5009 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5011 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5012 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5013 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5014 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5015 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5016 AMD_FMT_MOD_SET(DCC, 1) |
5017 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5018 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5019 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5020 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5022 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5023 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5024 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5025 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5026 AMD_FMT_MOD_SET(PACKERS, pkrs));
5028 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5029 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5030 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5031 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5032 AMD_FMT_MOD_SET(PACKERS, pkrs));
5034 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5035 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5036 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5037 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5039 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5040 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5041 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5045 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5047 uint64_t size = 0, capacity = 128;
5050 /* We have not hooked up any pre-GFX9 modifiers. */
5051 if (adev->family < AMDGPU_FAMILY_AI)
5054 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5056 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5057 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5058 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5059 return *mods ? 0 : -ENOMEM;
5062 switch (adev->family) {
5063 case AMDGPU_FAMILY_AI:
5064 case AMDGPU_FAMILY_RV:
5065 add_gfx9_modifiers(adev, mods, &size, &capacity);
5067 case AMDGPU_FAMILY_NV:
5068 case AMDGPU_FAMILY_VGH:
5069 case AMDGPU_FAMILY_YC:
5070 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5071 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5073 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5077 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5079 /* INVALID marks the end of the list. */
5080 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5089 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5090 const struct amdgpu_framebuffer *afb,
5091 const enum surface_pixel_format format,
5092 const enum dc_rotation_angle rotation,
5093 const struct plane_size *plane_size,
5094 union dc_tiling_info *tiling_info,
5095 struct dc_plane_dcc_param *dcc,
5096 struct dc_plane_address *address,
5097 const bool force_disable_dcc)
5099 const uint64_t modifier = afb->base.modifier;
5102 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5103 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5105 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5106 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5107 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5108 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5111 dcc->meta_pitch = afb->base.pitches[1];
5112 dcc->independent_64b_blks = independent_64b_blks;
5113 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5114 if (independent_64b_blks && independent_128b_blks)
5115 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5116 else if (independent_128b_blks)
5117 dcc->dcc_ind_blk = hubp_ind_block_128b;
5118 else if (independent_64b_blks && !independent_128b_blks)
5119 dcc->dcc_ind_blk = hubp_ind_block_64b;
5121 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5123 if (independent_64b_blks)
5124 dcc->dcc_ind_blk = hubp_ind_block_64b;
5126 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5129 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5130 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5133 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5135 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5141 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5142 const struct amdgpu_framebuffer *afb,
5143 const enum surface_pixel_format format,
5144 const enum dc_rotation_angle rotation,
5145 const uint64_t tiling_flags,
5146 union dc_tiling_info *tiling_info,
5147 struct plane_size *plane_size,
5148 struct dc_plane_dcc_param *dcc,
5149 struct dc_plane_address *address,
5151 bool force_disable_dcc)
5153 const struct drm_framebuffer *fb = &afb->base;
5156 memset(tiling_info, 0, sizeof(*tiling_info));
5157 memset(plane_size, 0, sizeof(*plane_size));
5158 memset(dcc, 0, sizeof(*dcc));
5159 memset(address, 0, sizeof(*address));
5161 address->tmz_surface = tmz_surface;
5163 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5164 uint64_t addr = afb->address + fb->offsets[0];
5166 plane_size->surface_size.x = 0;
5167 plane_size->surface_size.y = 0;
5168 plane_size->surface_size.width = fb->width;
5169 plane_size->surface_size.height = fb->height;
5170 plane_size->surface_pitch =
5171 fb->pitches[0] / fb->format->cpp[0];
5173 address->type = PLN_ADDR_TYPE_GRAPHICS;
5174 address->grph.addr.low_part = lower_32_bits(addr);
5175 address->grph.addr.high_part = upper_32_bits(addr);
5176 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5177 uint64_t luma_addr = afb->address + fb->offsets[0];
5178 uint64_t chroma_addr = afb->address + fb->offsets[1];
5180 plane_size->surface_size.x = 0;
5181 plane_size->surface_size.y = 0;
5182 plane_size->surface_size.width = fb->width;
5183 plane_size->surface_size.height = fb->height;
5184 plane_size->surface_pitch =
5185 fb->pitches[0] / fb->format->cpp[0];
5187 plane_size->chroma_size.x = 0;
5188 plane_size->chroma_size.y = 0;
5189 /* TODO: set these based on surface format */
5190 plane_size->chroma_size.width = fb->width / 2;
5191 plane_size->chroma_size.height = fb->height / 2;
5193 plane_size->chroma_pitch =
5194 fb->pitches[1] / fb->format->cpp[1];
5196 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5197 address->video_progressive.luma_addr.low_part =
5198 lower_32_bits(luma_addr);
5199 address->video_progressive.luma_addr.high_part =
5200 upper_32_bits(luma_addr);
5201 address->video_progressive.chroma_addr.low_part =
5202 lower_32_bits(chroma_addr);
5203 address->video_progressive.chroma_addr.high_part =
5204 upper_32_bits(chroma_addr);
5207 if (adev->family >= AMDGPU_FAMILY_AI) {
5208 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5209 rotation, plane_size,
5216 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5223 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5224 bool *per_pixel_alpha, bool *global_alpha,
5225 int *global_alpha_value)
5227 *per_pixel_alpha = false;
5228 *global_alpha = false;
5229 *global_alpha_value = 0xff;
5231 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5234 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5235 static const uint32_t alpha_formats[] = {
5236 DRM_FORMAT_ARGB8888,
5237 DRM_FORMAT_RGBA8888,
5238 DRM_FORMAT_ABGR8888,
5240 uint32_t format = plane_state->fb->format->format;
5243 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5244 if (format == alpha_formats[i]) {
5245 *per_pixel_alpha = true;
5251 if (plane_state->alpha < 0xffff) {
5252 *global_alpha = true;
5253 *global_alpha_value = plane_state->alpha >> 8;
5258 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5259 const enum surface_pixel_format format,
5260 enum dc_color_space *color_space)
5264 *color_space = COLOR_SPACE_SRGB;
5266 /* DRM color properties only affect non-RGB formats. */
5267 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5270 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5272 switch (plane_state->color_encoding) {
5273 case DRM_COLOR_YCBCR_BT601:
5275 *color_space = COLOR_SPACE_YCBCR601;
5277 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5280 case DRM_COLOR_YCBCR_BT709:
5282 *color_space = COLOR_SPACE_YCBCR709;
5284 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5287 case DRM_COLOR_YCBCR_BT2020:
5289 *color_space = COLOR_SPACE_2020_YCBCR;
5302 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5303 const struct drm_plane_state *plane_state,
5304 const uint64_t tiling_flags,
5305 struct dc_plane_info *plane_info,
5306 struct dc_plane_address *address,
5308 bool force_disable_dcc)
5310 const struct drm_framebuffer *fb = plane_state->fb;
5311 const struct amdgpu_framebuffer *afb =
5312 to_amdgpu_framebuffer(plane_state->fb);
5315 memset(plane_info, 0, sizeof(*plane_info));
5317 switch (fb->format->format) {
5319 plane_info->format =
5320 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5322 case DRM_FORMAT_RGB565:
5323 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5325 case DRM_FORMAT_XRGB8888:
5326 case DRM_FORMAT_ARGB8888:
5327 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5329 case DRM_FORMAT_XRGB2101010:
5330 case DRM_FORMAT_ARGB2101010:
5331 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5333 case DRM_FORMAT_XBGR2101010:
5334 case DRM_FORMAT_ABGR2101010:
5335 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5337 case DRM_FORMAT_XBGR8888:
5338 case DRM_FORMAT_ABGR8888:
5339 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5341 case DRM_FORMAT_NV21:
5342 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5344 case DRM_FORMAT_NV12:
5345 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5347 case DRM_FORMAT_P010:
5348 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5350 case DRM_FORMAT_XRGB16161616F:
5351 case DRM_FORMAT_ARGB16161616F:
5352 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5354 case DRM_FORMAT_XBGR16161616F:
5355 case DRM_FORMAT_ABGR16161616F:
5356 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5358 case DRM_FORMAT_XRGB16161616:
5359 case DRM_FORMAT_ARGB16161616:
5360 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5362 case DRM_FORMAT_XBGR16161616:
5363 case DRM_FORMAT_ABGR16161616:
5364 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5368 "Unsupported screen format %p4cc\n",
5369 &fb->format->format);
5373 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5374 case DRM_MODE_ROTATE_0:
5375 plane_info->rotation = ROTATION_ANGLE_0;
5377 case DRM_MODE_ROTATE_90:
5378 plane_info->rotation = ROTATION_ANGLE_90;
5380 case DRM_MODE_ROTATE_180:
5381 plane_info->rotation = ROTATION_ANGLE_180;
5383 case DRM_MODE_ROTATE_270:
5384 plane_info->rotation = ROTATION_ANGLE_270;
5387 plane_info->rotation = ROTATION_ANGLE_0;
5391 plane_info->visible = true;
5392 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5394 plane_info->layer_index = 0;
5396 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5397 &plane_info->color_space);
5401 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5402 plane_info->rotation, tiling_flags,
5403 &plane_info->tiling_info,
5404 &plane_info->plane_size,
5405 &plane_info->dcc, address, tmz_surface,
5410 fill_blending_from_plane_state(
5411 plane_state, &plane_info->per_pixel_alpha,
5412 &plane_info->global_alpha, &plane_info->global_alpha_value);
5417 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5418 struct dc_plane_state *dc_plane_state,
5419 struct drm_plane_state *plane_state,
5420 struct drm_crtc_state *crtc_state)
5422 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5423 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5424 struct dc_scaling_info scaling_info;
5425 struct dc_plane_info plane_info;
5427 bool force_disable_dcc = false;
5429 ret = fill_dc_scaling_info(plane_state, &scaling_info);
5433 dc_plane_state->src_rect = scaling_info.src_rect;
5434 dc_plane_state->dst_rect = scaling_info.dst_rect;
5435 dc_plane_state->clip_rect = scaling_info.clip_rect;
5436 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5438 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5439 ret = fill_dc_plane_info_and_addr(adev, plane_state,
5442 &dc_plane_state->address,
5448 dc_plane_state->format = plane_info.format;
5449 dc_plane_state->color_space = plane_info.color_space;
5450 dc_plane_state->format = plane_info.format;
5451 dc_plane_state->plane_size = plane_info.plane_size;
5452 dc_plane_state->rotation = plane_info.rotation;
5453 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5454 dc_plane_state->stereo_format = plane_info.stereo_format;
5455 dc_plane_state->tiling_info = plane_info.tiling_info;
5456 dc_plane_state->visible = plane_info.visible;
5457 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5458 dc_plane_state->global_alpha = plane_info.global_alpha;
5459 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5460 dc_plane_state->dcc = plane_info.dcc;
5461 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5462 dc_plane_state->flip_int_enabled = true;
5465 * Always set input transfer function, since plane state is refreshed
5468 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5475 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5476 const struct dm_connector_state *dm_state,
5477 struct dc_stream_state *stream)
5479 enum amdgpu_rmx_type rmx_type;
5481 struct rect src = { 0 }; /* viewport in composition space*/
5482 struct rect dst = { 0 }; /* stream addressable area */
5484 /* no mode. nothing to be done */
5488 /* Full screen scaling by default */
5489 src.width = mode->hdisplay;
5490 src.height = mode->vdisplay;
5491 dst.width = stream->timing.h_addressable;
5492 dst.height = stream->timing.v_addressable;
5495 rmx_type = dm_state->scaling;
5496 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5497 if (src.width * dst.height <
5498 src.height * dst.width) {
5499 /* height needs less upscaling/more downscaling */
5500 dst.width = src.width *
5501 dst.height / src.height;
5503 /* width needs less upscaling/more downscaling */
5504 dst.height = src.height *
5505 dst.width / src.width;
5507 } else if (rmx_type == RMX_CENTER) {
5511 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5512 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5514 if (dm_state->underscan_enable) {
5515 dst.x += dm_state->underscan_hborder / 2;
5516 dst.y += dm_state->underscan_vborder / 2;
5517 dst.width -= dm_state->underscan_hborder;
5518 dst.height -= dm_state->underscan_vborder;
5525 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5526 dst.x, dst.y, dst.width, dst.height);
5530 static enum dc_color_depth
5531 convert_color_depth_from_display_info(const struct drm_connector *connector,
5532 bool is_y420, int requested_bpc)
5539 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5540 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5542 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5544 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5547 bpc = (uint8_t)connector->display_info.bpc;
5548 /* Assume 8 bpc by default if no bpc is specified. */
5549 bpc = bpc ? bpc : 8;
5552 if (requested_bpc > 0) {
5554 * Cap display bpc based on the user requested value.
5556 * The value for state->max_bpc may not correctly updated
5557 * depending on when the connector gets added to the state
5558 * or if this was called outside of atomic check, so it
5559 * can't be used directly.
5561 bpc = min_t(u8, bpc, requested_bpc);
5563 /* Round down to the nearest even number. */
5564 bpc = bpc - (bpc & 1);
5570 * Temporary Work around, DRM doesn't parse color depth for
5571 * EDID revision before 1.4
5572 * TODO: Fix edid parsing
5574 return COLOR_DEPTH_888;
5576 return COLOR_DEPTH_666;
5578 return COLOR_DEPTH_888;
5580 return COLOR_DEPTH_101010;
5582 return COLOR_DEPTH_121212;
5584 return COLOR_DEPTH_141414;
5586 return COLOR_DEPTH_161616;
5588 return COLOR_DEPTH_UNDEFINED;
5592 static enum dc_aspect_ratio
5593 get_aspect_ratio(const struct drm_display_mode *mode_in)
5595 /* 1-1 mapping, since both enums follow the HDMI spec. */
5596 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5599 static enum dc_color_space
5600 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5602 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5604 switch (dc_crtc_timing->pixel_encoding) {
5605 case PIXEL_ENCODING_YCBCR422:
5606 case PIXEL_ENCODING_YCBCR444:
5607 case PIXEL_ENCODING_YCBCR420:
5610 * 27030khz is the separation point between HDTV and SDTV
5611 * according to HDMI spec, we use YCbCr709 and YCbCr601
5614 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5615 if (dc_crtc_timing->flags.Y_ONLY)
5617 COLOR_SPACE_YCBCR709_LIMITED;
5619 color_space = COLOR_SPACE_YCBCR709;
5621 if (dc_crtc_timing->flags.Y_ONLY)
5623 COLOR_SPACE_YCBCR601_LIMITED;
5625 color_space = COLOR_SPACE_YCBCR601;
5630 case PIXEL_ENCODING_RGB:
5631 color_space = COLOR_SPACE_SRGB;
5642 static bool adjust_colour_depth_from_display_info(
5643 struct dc_crtc_timing *timing_out,
5644 const struct drm_display_info *info)
5646 enum dc_color_depth depth = timing_out->display_color_depth;
5649 normalized_clk = timing_out->pix_clk_100hz / 10;
5650 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5651 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5652 normalized_clk /= 2;
5653 /* Adjusting pix clock following on HDMI spec based on colour depth */
5655 case COLOR_DEPTH_888:
5657 case COLOR_DEPTH_101010:
5658 normalized_clk = (normalized_clk * 30) / 24;
5660 case COLOR_DEPTH_121212:
5661 normalized_clk = (normalized_clk * 36) / 24;
5663 case COLOR_DEPTH_161616:
5664 normalized_clk = (normalized_clk * 48) / 24;
5667 /* The above depths are the only ones valid for HDMI. */
5670 if (normalized_clk <= info->max_tmds_clock) {
5671 timing_out->display_color_depth = depth;
5674 } while (--depth > COLOR_DEPTH_666);
5678 static void fill_stream_properties_from_drm_display_mode(
5679 struct dc_stream_state *stream,
5680 const struct drm_display_mode *mode_in,
5681 const struct drm_connector *connector,
5682 const struct drm_connector_state *connector_state,
5683 const struct dc_stream_state *old_stream,
5686 struct dc_crtc_timing *timing_out = &stream->timing;
5687 const struct drm_display_info *info = &connector->display_info;
5688 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5689 struct hdmi_vendor_infoframe hv_frame;
5690 struct hdmi_avi_infoframe avi_frame;
5692 memset(&hv_frame, 0, sizeof(hv_frame));
5693 memset(&avi_frame, 0, sizeof(avi_frame));
5695 timing_out->h_border_left = 0;
5696 timing_out->h_border_right = 0;
5697 timing_out->v_border_top = 0;
5698 timing_out->v_border_bottom = 0;
5699 /* TODO: un-hardcode */
5700 if (drm_mode_is_420_only(info, mode_in)
5701 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5702 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5703 else if (drm_mode_is_420_also(info, mode_in)
5704 && aconnector->force_yuv420_output)
5705 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5706 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5707 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5708 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5710 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5712 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5713 timing_out->display_color_depth = convert_color_depth_from_display_info(
5715 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5717 timing_out->scan_type = SCANNING_TYPE_NODATA;
5718 timing_out->hdmi_vic = 0;
5721 timing_out->vic = old_stream->timing.vic;
5722 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5723 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5725 timing_out->vic = drm_match_cea_mode(mode_in);
5726 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5727 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5728 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5729 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5732 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5733 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5734 timing_out->vic = avi_frame.video_code;
5735 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5736 timing_out->hdmi_vic = hv_frame.vic;
5739 if (is_freesync_video_mode(mode_in, aconnector)) {
5740 timing_out->h_addressable = mode_in->hdisplay;
5741 timing_out->h_total = mode_in->htotal;
5742 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5743 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5744 timing_out->v_total = mode_in->vtotal;
5745 timing_out->v_addressable = mode_in->vdisplay;
5746 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5747 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5748 timing_out->pix_clk_100hz = mode_in->clock * 10;
5750 timing_out->h_addressable = mode_in->crtc_hdisplay;
5751 timing_out->h_total = mode_in->crtc_htotal;
5752 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5753 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5754 timing_out->v_total = mode_in->crtc_vtotal;
5755 timing_out->v_addressable = mode_in->crtc_vdisplay;
5756 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5757 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5758 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5761 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5763 stream->output_color_space = get_output_color_space(timing_out);
5765 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5766 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5767 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5768 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5769 drm_mode_is_420_also(info, mode_in) &&
5770 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5771 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5772 adjust_colour_depth_from_display_info(timing_out, info);
5777 static void fill_audio_info(struct audio_info *audio_info,
5778 const struct drm_connector *drm_connector,
5779 const struct dc_sink *dc_sink)
5782 int cea_revision = 0;
5783 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5785 audio_info->manufacture_id = edid_caps->manufacturer_id;
5786 audio_info->product_id = edid_caps->product_id;
5788 cea_revision = drm_connector->display_info.cea_rev;
5790 strscpy(audio_info->display_name,
5791 edid_caps->display_name,
5792 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5794 if (cea_revision >= 3) {
5795 audio_info->mode_count = edid_caps->audio_mode_count;
5797 for (i = 0; i < audio_info->mode_count; ++i) {
5798 audio_info->modes[i].format_code =
5799 (enum audio_format_code)
5800 (edid_caps->audio_modes[i].format_code);
5801 audio_info->modes[i].channel_count =
5802 edid_caps->audio_modes[i].channel_count;
5803 audio_info->modes[i].sample_rates.all =
5804 edid_caps->audio_modes[i].sample_rate;
5805 audio_info->modes[i].sample_size =
5806 edid_caps->audio_modes[i].sample_size;
5810 audio_info->flags.all = edid_caps->speaker_flags;
5812 /* TODO: We only check for the progressive mode, check for interlace mode too */
5813 if (drm_connector->latency_present[0]) {
5814 audio_info->video_latency = drm_connector->video_latency[0];
5815 audio_info->audio_latency = drm_connector->audio_latency[0];
5818 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5823 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5824 struct drm_display_mode *dst_mode)
5826 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5827 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5828 dst_mode->crtc_clock = src_mode->crtc_clock;
5829 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5830 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5831 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5832 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5833 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5834 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5835 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5836 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5837 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5838 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5839 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5843 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5844 const struct drm_display_mode *native_mode,
5847 if (scale_enabled) {
5848 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5849 } else if (native_mode->clock == drm_mode->clock &&
5850 native_mode->htotal == drm_mode->htotal &&
5851 native_mode->vtotal == drm_mode->vtotal) {
5852 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5854 /* no scaling nor amdgpu inserted, no need to patch */
5858 static struct dc_sink *
5859 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5861 struct dc_sink_init_data sink_init_data = { 0 };
5862 struct dc_sink *sink = NULL;
5863 sink_init_data.link = aconnector->dc_link;
5864 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5866 sink = dc_sink_create(&sink_init_data);
5868 DRM_ERROR("Failed to create sink!\n");
5871 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5876 static void set_multisync_trigger_params(
5877 struct dc_stream_state *stream)
5879 struct dc_stream_state *master = NULL;
5881 if (stream->triggered_crtc_reset.enabled) {
5882 master = stream->triggered_crtc_reset.event_source;
5883 stream->triggered_crtc_reset.event =
5884 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5885 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5886 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5890 static void set_master_stream(struct dc_stream_state *stream_set[],
5893 int j, highest_rfr = 0, master_stream = 0;
5895 for (j = 0; j < stream_count; j++) {
5896 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5897 int refresh_rate = 0;
5899 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5900 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5901 if (refresh_rate > highest_rfr) {
5902 highest_rfr = refresh_rate;
5907 for (j = 0; j < stream_count; j++) {
5909 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5913 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5916 struct dc_stream_state *stream;
5918 if (context->stream_count < 2)
5920 for (i = 0; i < context->stream_count ; i++) {
5921 if (!context->streams[i])
5924 * TODO: add a function to read AMD VSDB bits and set
5925 * crtc_sync_master.multi_sync_enabled flag
5926 * For now it's set to false
5930 set_master_stream(context->streams, context->stream_count);
5932 for (i = 0; i < context->stream_count ; i++) {
5933 stream = context->streams[i];
5938 set_multisync_trigger_params(stream);
5942 #if defined(CONFIG_DRM_AMD_DC_DCN)
5943 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5944 struct dc_sink *sink, struct dc_stream_state *stream,
5945 struct dsc_dec_dpcd_caps *dsc_caps)
5947 stream->timing.flags.DSC = 0;
5949 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5950 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5951 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5952 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5957 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5958 struct dc_sink *sink, struct dc_stream_state *stream,
5959 struct dsc_dec_dpcd_caps *dsc_caps)
5961 struct drm_connector *drm_connector = &aconnector->base;
5962 uint32_t link_bandwidth_kbps;
5963 uint32_t max_dsc_target_bpp_limit_override = 0;
5965 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5966 dc_link_get_link_cap(aconnector->dc_link));
5968 if (stream->link && stream->link->local_sink)
5969 max_dsc_target_bpp_limit_override =
5970 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
5972 /* Set DSC policy according to dsc_clock_en */
5973 dc_dsc_policy_set_enable_dsc_when_not_needed(
5974 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5976 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5978 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5980 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5981 max_dsc_target_bpp_limit_override,
5982 link_bandwidth_kbps,
5984 &stream->timing.dsc_cfg)) {
5985 stream->timing.flags.DSC = 1;
5986 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5990 /* Overwrite the stream flag if DSC is enabled through debugfs */
5991 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5992 stream->timing.flags.DSC = 1;
5994 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5995 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5997 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5998 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6000 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6001 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6006 * DOC: FreeSync Video
6008 * When a userspace application wants to play a video, the content follows a
6009 * standard format definition that usually specifies the FPS for that format.
6010 * The below list illustrates some video format and the expected FPS,
6013 * - TV/NTSC (23.976 FPS)
6016 * - TV/NTSC (29.97 FPS)
6017 * - TV/NTSC (30 FPS)
6018 * - Cinema HFR (48 FPS)
6020 * - Commonly used (60 FPS)
6021 * - Multiples of 24 (48,72,96,120 FPS)
6023 * The list of standards video format is not huge and can be added to the
6024 * connector modeset list beforehand. With that, userspace can leverage
6025 * FreeSync to extends the front porch in order to attain the target refresh
6026 * rate. Such a switch will happen seamlessly, without screen blanking or
6027 * reprogramming of the output in any other way. If the userspace requests a
6028 * modesetting change compatible with FreeSync modes that only differ in the
6029 * refresh rate, DC will skip the full update and avoid blink during the
6030 * transition. For example, the video player can change the modesetting from
6031 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6032 * causing any display blink. This same concept can be applied to a mode
6035 static struct drm_display_mode *
6036 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6037 bool use_probed_modes)
6039 struct drm_display_mode *m, *m_pref = NULL;
6040 u16 current_refresh, highest_refresh;
6041 struct list_head *list_head = use_probed_modes ?
6042 &aconnector->base.probed_modes :
6043 &aconnector->base.modes;
6045 if (aconnector->freesync_vid_base.clock != 0)
6046 return &aconnector->freesync_vid_base;
6048 /* Find the preferred mode */
6049 list_for_each_entry (m, list_head, head) {
6050 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6057 /* Probably an EDID with no preferred mode. Fallback to first entry */
6058 m_pref = list_first_entry_or_null(
6059 &aconnector->base.modes, struct drm_display_mode, head);
6061 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6066 highest_refresh = drm_mode_vrefresh(m_pref);
6069 * Find the mode with highest refresh rate with same resolution.
6070 * For some monitors, preferred mode is not the mode with highest
6071 * supported refresh rate.
6073 list_for_each_entry (m, list_head, head) {
6074 current_refresh = drm_mode_vrefresh(m);
6076 if (m->hdisplay == m_pref->hdisplay &&
6077 m->vdisplay == m_pref->vdisplay &&
6078 highest_refresh < current_refresh) {
6079 highest_refresh = current_refresh;
6084 aconnector->freesync_vid_base = *m_pref;
6088 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6089 struct amdgpu_dm_connector *aconnector)
6091 struct drm_display_mode *high_mode;
6094 high_mode = get_highest_refresh_rate_mode(aconnector, false);
6095 if (!high_mode || !mode)
6098 timing_diff = high_mode->vtotal - mode->vtotal;
6100 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6101 high_mode->hdisplay != mode->hdisplay ||
6102 high_mode->vdisplay != mode->vdisplay ||
6103 high_mode->hsync_start != mode->hsync_start ||
6104 high_mode->hsync_end != mode->hsync_end ||
6105 high_mode->htotal != mode->htotal ||
6106 high_mode->hskew != mode->hskew ||
6107 high_mode->vscan != mode->vscan ||
6108 high_mode->vsync_start - mode->vsync_start != timing_diff ||
6109 high_mode->vsync_end - mode->vsync_end != timing_diff)
6115 static struct dc_stream_state *
6116 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6117 const struct drm_display_mode *drm_mode,
6118 const struct dm_connector_state *dm_state,
6119 const struct dc_stream_state *old_stream,
6122 struct drm_display_mode *preferred_mode = NULL;
6123 struct drm_connector *drm_connector;
6124 const struct drm_connector_state *con_state =
6125 dm_state ? &dm_state->base : NULL;
6126 struct dc_stream_state *stream = NULL;
6127 struct drm_display_mode mode = *drm_mode;
6128 struct drm_display_mode saved_mode;
6129 struct drm_display_mode *freesync_mode = NULL;
6130 bool native_mode_found = false;
6131 bool recalculate_timing = false;
6132 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6134 int preferred_refresh = 0;
6135 #if defined(CONFIG_DRM_AMD_DC_DCN)
6136 struct dsc_dec_dpcd_caps dsc_caps;
6138 struct dc_sink *sink = NULL;
6140 memset(&saved_mode, 0, sizeof(saved_mode));
6142 if (aconnector == NULL) {
6143 DRM_ERROR("aconnector is NULL!\n");
6147 drm_connector = &aconnector->base;
6149 if (!aconnector->dc_sink) {
6150 sink = create_fake_sink(aconnector);
6154 sink = aconnector->dc_sink;
6155 dc_sink_retain(sink);
6158 stream = dc_create_stream_for_sink(sink);
6160 if (stream == NULL) {
6161 DRM_ERROR("Failed to create stream for sink!\n");
6165 stream->dm_stream_context = aconnector;
6167 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6168 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6170 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6171 /* Search for preferred mode */
6172 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6173 native_mode_found = true;
6177 if (!native_mode_found)
6178 preferred_mode = list_first_entry_or_null(
6179 &aconnector->base.modes,
6180 struct drm_display_mode,
6183 mode_refresh = drm_mode_vrefresh(&mode);
6185 if (preferred_mode == NULL) {
6187 * This may not be an error, the use case is when we have no
6188 * usermode calls to reset and set mode upon hotplug. In this
6189 * case, we call set mode ourselves to restore the previous mode
6190 * and the modelist may not be filled in in time.
6192 DRM_DEBUG_DRIVER("No preferred mode found\n");
6194 recalculate_timing = amdgpu_freesync_vid_mode &&
6195 is_freesync_video_mode(&mode, aconnector);
6196 if (recalculate_timing) {
6197 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6199 mode = *freesync_mode;
6201 decide_crtc_timing_for_drm_display_mode(
6202 &mode, preferred_mode, scale);
6204 preferred_refresh = drm_mode_vrefresh(preferred_mode);
6208 if (recalculate_timing)
6209 drm_mode_set_crtcinfo(&saved_mode, 0);
6211 drm_mode_set_crtcinfo(&mode, 0);
6214 * If scaling is enabled and refresh rate didn't change
6215 * we copy the vic and polarities of the old timings
6217 if (!scale || mode_refresh != preferred_refresh)
6218 fill_stream_properties_from_drm_display_mode(
6219 stream, &mode, &aconnector->base, con_state, NULL,
6222 fill_stream_properties_from_drm_display_mode(
6223 stream, &mode, &aconnector->base, con_state, old_stream,
6226 #if defined(CONFIG_DRM_AMD_DC_DCN)
6227 /* SST DSC determination policy */
6228 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6229 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6230 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6233 update_stream_scaling_settings(&mode, dm_state, stream);
6236 &stream->audio_info,
6240 update_stream_signal(stream, sink);
6242 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6243 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6245 if (stream->link->psr_settings.psr_feature_enabled) {
6247 // should decide stream support vsc sdp colorimetry capability
6248 // before building vsc info packet
6250 stream->use_vsc_sdp_for_colorimetry = false;
6251 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6252 stream->use_vsc_sdp_for_colorimetry =
6253 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6255 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6256 stream->use_vsc_sdp_for_colorimetry = true;
6258 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6259 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6263 dc_sink_release(sink);
6268 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6270 drm_crtc_cleanup(crtc);
6274 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6275 struct drm_crtc_state *state)
6277 struct dm_crtc_state *cur = to_dm_crtc_state(state);
6279 /* TODO Destroy dc_stream objects are stream object is flattened */
6281 dc_stream_release(cur->stream);
6284 __drm_atomic_helper_crtc_destroy_state(state);
6290 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6292 struct dm_crtc_state *state;
6295 dm_crtc_destroy_state(crtc, crtc->state);
6297 state = kzalloc(sizeof(*state), GFP_KERNEL);
6298 if (WARN_ON(!state))
6301 __drm_atomic_helper_crtc_reset(crtc, &state->base);
6304 static struct drm_crtc_state *
6305 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6307 struct dm_crtc_state *state, *cur;
6309 cur = to_dm_crtc_state(crtc->state);
6311 if (WARN_ON(!crtc->state))
6314 state = kzalloc(sizeof(*state), GFP_KERNEL);
6318 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6321 state->stream = cur->stream;
6322 dc_stream_retain(state->stream);
6325 state->active_planes = cur->active_planes;
6326 state->vrr_infopacket = cur->vrr_infopacket;
6327 state->abm_level = cur->abm_level;
6328 state->vrr_supported = cur->vrr_supported;
6329 state->freesync_config = cur->freesync_config;
6330 state->cm_has_degamma = cur->cm_has_degamma;
6331 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6332 state->force_dpms_off = cur->force_dpms_off;
6333 /* TODO Duplicate dc_stream after objects are stream object is flattened */
6335 return &state->base;
6338 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6339 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6341 crtc_debugfs_init(crtc);
6347 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6349 enum dc_irq_source irq_source;
6350 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6351 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6354 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6356 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6358 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6359 acrtc->crtc_id, enable ? "en" : "dis", rc);
6363 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6365 enum dc_irq_source irq_source;
6366 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6367 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6368 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6369 #if defined(CONFIG_DRM_AMD_DC_DCN)
6370 struct amdgpu_display_manager *dm = &adev->dm;
6371 struct vblank_control_work *work;
6376 /* vblank irq on -> Only need vupdate irq in vrr mode */
6377 if (amdgpu_dm_vrr_active(acrtc_state))
6378 rc = dm_set_vupdate_irq(crtc, true);
6380 /* vblank irq off -> vupdate irq off */
6381 rc = dm_set_vupdate_irq(crtc, false);
6387 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6389 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6392 if (amdgpu_in_reset(adev))
6395 #if defined(CONFIG_DRM_AMD_DC_DCN)
6396 if (dm->vblank_control_workqueue) {
6397 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6401 INIT_WORK(&work->work, vblank_control_worker);
6403 work->acrtc = acrtc;
6404 work->enable = enable;
6406 if (acrtc_state->stream) {
6407 dc_stream_retain(acrtc_state->stream);
6408 work->stream = acrtc_state->stream;
6411 queue_work(dm->vblank_control_workqueue, &work->work);
6418 static int dm_enable_vblank(struct drm_crtc *crtc)
6420 return dm_set_vblank(crtc, true);
6423 static void dm_disable_vblank(struct drm_crtc *crtc)
6425 dm_set_vblank(crtc, false);
6428 /* Implemented only the options currently availible for the driver */
6429 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6430 .reset = dm_crtc_reset_state,
6431 .destroy = amdgpu_dm_crtc_destroy,
6432 .set_config = drm_atomic_helper_set_config,
6433 .page_flip = drm_atomic_helper_page_flip,
6434 .atomic_duplicate_state = dm_crtc_duplicate_state,
6435 .atomic_destroy_state = dm_crtc_destroy_state,
6436 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6437 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6438 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6439 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6440 .enable_vblank = dm_enable_vblank,
6441 .disable_vblank = dm_disable_vblank,
6442 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6443 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6444 .late_register = amdgpu_dm_crtc_late_register,
6448 static enum drm_connector_status
6449 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6452 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6456 * 1. This interface is NOT called in context of HPD irq.
6457 * 2. This interface *is called* in context of user-mode ioctl. Which
6458 * makes it a bad place for *any* MST-related activity.
6461 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6462 !aconnector->fake_enable)
6463 connected = (aconnector->dc_sink != NULL);
6465 connected = (aconnector->base.force == DRM_FORCE_ON);
6467 update_subconnector_property(aconnector);
6469 return (connected ? connector_status_connected :
6470 connector_status_disconnected);
6473 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6474 struct drm_connector_state *connector_state,
6475 struct drm_property *property,
6478 struct drm_device *dev = connector->dev;
6479 struct amdgpu_device *adev = drm_to_adev(dev);
6480 struct dm_connector_state *dm_old_state =
6481 to_dm_connector_state(connector->state);
6482 struct dm_connector_state *dm_new_state =
6483 to_dm_connector_state(connector_state);
6487 if (property == dev->mode_config.scaling_mode_property) {
6488 enum amdgpu_rmx_type rmx_type;
6491 case DRM_MODE_SCALE_CENTER:
6492 rmx_type = RMX_CENTER;
6494 case DRM_MODE_SCALE_ASPECT:
6495 rmx_type = RMX_ASPECT;
6497 case DRM_MODE_SCALE_FULLSCREEN:
6498 rmx_type = RMX_FULL;
6500 case DRM_MODE_SCALE_NONE:
6506 if (dm_old_state->scaling == rmx_type)
6509 dm_new_state->scaling = rmx_type;
6511 } else if (property == adev->mode_info.underscan_hborder_property) {
6512 dm_new_state->underscan_hborder = val;
6514 } else if (property == adev->mode_info.underscan_vborder_property) {
6515 dm_new_state->underscan_vborder = val;
6517 } else if (property == adev->mode_info.underscan_property) {
6518 dm_new_state->underscan_enable = val;
6520 } else if (property == adev->mode_info.abm_level_property) {
6521 dm_new_state->abm_level = val;
6528 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6529 const struct drm_connector_state *state,
6530 struct drm_property *property,
6533 struct drm_device *dev = connector->dev;
6534 struct amdgpu_device *adev = drm_to_adev(dev);
6535 struct dm_connector_state *dm_state =
6536 to_dm_connector_state(state);
6539 if (property == dev->mode_config.scaling_mode_property) {
6540 switch (dm_state->scaling) {
6542 *val = DRM_MODE_SCALE_CENTER;
6545 *val = DRM_MODE_SCALE_ASPECT;
6548 *val = DRM_MODE_SCALE_FULLSCREEN;
6552 *val = DRM_MODE_SCALE_NONE;
6556 } else if (property == adev->mode_info.underscan_hborder_property) {
6557 *val = dm_state->underscan_hborder;
6559 } else if (property == adev->mode_info.underscan_vborder_property) {
6560 *val = dm_state->underscan_vborder;
6562 } else if (property == adev->mode_info.underscan_property) {
6563 *val = dm_state->underscan_enable;
6565 } else if (property == adev->mode_info.abm_level_property) {
6566 *val = dm_state->abm_level;
6573 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6575 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6577 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6580 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6582 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6583 const struct dc_link *link = aconnector->dc_link;
6584 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6585 struct amdgpu_display_manager *dm = &adev->dm;
6589 * Call only if mst_mgr was iniitalized before since it's not done
6590 * for all connector types.
6592 if (aconnector->mst_mgr.dev)
6593 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6595 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6596 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6597 for (i = 0; i < dm->num_of_edps; i++) {
6598 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6599 backlight_device_unregister(dm->backlight_dev[i]);
6600 dm->backlight_dev[i] = NULL;
6605 if (aconnector->dc_em_sink)
6606 dc_sink_release(aconnector->dc_em_sink);
6607 aconnector->dc_em_sink = NULL;
6608 if (aconnector->dc_sink)
6609 dc_sink_release(aconnector->dc_sink);
6610 aconnector->dc_sink = NULL;
6612 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6613 drm_connector_unregister(connector);
6614 drm_connector_cleanup(connector);
6615 if (aconnector->i2c) {
6616 i2c_del_adapter(&aconnector->i2c->base);
6617 kfree(aconnector->i2c);
6619 kfree(aconnector->dm_dp_aux.aux.name);
6624 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6626 struct dm_connector_state *state =
6627 to_dm_connector_state(connector->state);
6629 if (connector->state)
6630 __drm_atomic_helper_connector_destroy_state(connector->state);
6634 state = kzalloc(sizeof(*state), GFP_KERNEL);
6637 state->scaling = RMX_OFF;
6638 state->underscan_enable = false;
6639 state->underscan_hborder = 0;
6640 state->underscan_vborder = 0;
6641 state->base.max_requested_bpc = 8;
6642 state->vcpi_slots = 0;
6644 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6645 state->abm_level = amdgpu_dm_abm_level;
6647 __drm_atomic_helper_connector_reset(connector, &state->base);
6651 struct drm_connector_state *
6652 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6654 struct dm_connector_state *state =
6655 to_dm_connector_state(connector->state);
6657 struct dm_connector_state *new_state =
6658 kmemdup(state, sizeof(*state), GFP_KERNEL);
6663 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6665 new_state->freesync_capable = state->freesync_capable;
6666 new_state->abm_level = state->abm_level;
6667 new_state->scaling = state->scaling;
6668 new_state->underscan_enable = state->underscan_enable;
6669 new_state->underscan_hborder = state->underscan_hborder;
6670 new_state->underscan_vborder = state->underscan_vborder;
6671 new_state->vcpi_slots = state->vcpi_slots;
6672 new_state->pbn = state->pbn;
6673 return &new_state->base;
6677 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6679 struct amdgpu_dm_connector *amdgpu_dm_connector =
6680 to_amdgpu_dm_connector(connector);
6683 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6684 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6685 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6686 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6691 #if defined(CONFIG_DEBUG_FS)
6692 connector_debugfs_init(amdgpu_dm_connector);
6698 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6699 .reset = amdgpu_dm_connector_funcs_reset,
6700 .detect = amdgpu_dm_connector_detect,
6701 .fill_modes = drm_helper_probe_single_connector_modes,
6702 .destroy = amdgpu_dm_connector_destroy,
6703 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6704 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6705 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6706 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6707 .late_register = amdgpu_dm_connector_late_register,
6708 .early_unregister = amdgpu_dm_connector_unregister
6711 static int get_modes(struct drm_connector *connector)
6713 return amdgpu_dm_connector_get_modes(connector);
6716 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6718 struct dc_sink_init_data init_params = {
6719 .link = aconnector->dc_link,
6720 .sink_signal = SIGNAL_TYPE_VIRTUAL
6724 if (!aconnector->base.edid_blob_ptr) {
6725 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6726 aconnector->base.name);
6728 aconnector->base.force = DRM_FORCE_OFF;
6729 aconnector->base.override_edid = false;
6733 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6735 aconnector->edid = edid;
6737 aconnector->dc_em_sink = dc_link_add_remote_sink(
6738 aconnector->dc_link,
6740 (edid->extensions + 1) * EDID_LENGTH,
6743 if (aconnector->base.force == DRM_FORCE_ON) {
6744 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6745 aconnector->dc_link->local_sink :
6746 aconnector->dc_em_sink;
6747 dc_sink_retain(aconnector->dc_sink);
6751 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6753 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6756 * In case of headless boot with force on for DP managed connector
6757 * Those settings have to be != 0 to get initial modeset
6759 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6760 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6761 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6765 aconnector->base.override_edid = true;
6766 create_eml_sink(aconnector);
6769 static struct dc_stream_state *
6770 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6771 const struct drm_display_mode *drm_mode,
6772 const struct dm_connector_state *dm_state,
6773 const struct dc_stream_state *old_stream)
6775 struct drm_connector *connector = &aconnector->base;
6776 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6777 struct dc_stream_state *stream;
6778 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6779 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6780 enum dc_status dc_result = DC_OK;
6783 stream = create_stream_for_sink(aconnector, drm_mode,
6784 dm_state, old_stream,
6786 if (stream == NULL) {
6787 DRM_ERROR("Failed to create stream for sink!\n");
6791 dc_result = dc_validate_stream(adev->dm.dc, stream);
6793 if (dc_result != DC_OK) {
6794 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6799 dc_status_to_str(dc_result));
6801 dc_stream_release(stream);
6803 requested_bpc -= 2; /* lower bpc to retry validation */
6806 } while (stream == NULL && requested_bpc >= 6);
6808 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6809 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6811 aconnector->force_yuv420_output = true;
6812 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6813 dm_state, old_stream);
6814 aconnector->force_yuv420_output = false;
6820 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6821 struct drm_display_mode *mode)
6823 int result = MODE_ERROR;
6824 struct dc_sink *dc_sink;
6825 /* TODO: Unhardcode stream count */
6826 struct dc_stream_state *stream;
6827 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6829 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6830 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6834 * Only run this the first time mode_valid is called to initilialize
6837 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6838 !aconnector->dc_em_sink)
6839 handle_edid_mgmt(aconnector);
6841 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6843 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6844 aconnector->base.force != DRM_FORCE_ON) {
6845 DRM_ERROR("dc_sink is NULL!\n");
6849 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6851 dc_stream_release(stream);
6856 /* TODO: error handling*/
6860 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6861 struct dc_info_packet *out)
6863 struct hdmi_drm_infoframe frame;
6864 unsigned char buf[30]; /* 26 + 4 */
6868 memset(out, 0, sizeof(*out));
6870 if (!state->hdr_output_metadata)
6873 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6877 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6881 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6885 /* Prepare the infopacket for DC. */
6886 switch (state->connector->connector_type) {
6887 case DRM_MODE_CONNECTOR_HDMIA:
6888 out->hb0 = 0x87; /* type */
6889 out->hb1 = 0x01; /* version */
6890 out->hb2 = 0x1A; /* length */
6891 out->sb[0] = buf[3]; /* checksum */
6895 case DRM_MODE_CONNECTOR_DisplayPort:
6896 case DRM_MODE_CONNECTOR_eDP:
6897 out->hb0 = 0x00; /* sdp id, zero */
6898 out->hb1 = 0x87; /* type */
6899 out->hb2 = 0x1D; /* payload len - 1 */
6900 out->hb3 = (0x13 << 2); /* sdp version */
6901 out->sb[0] = 0x01; /* version */
6902 out->sb[1] = 0x1A; /* length */
6910 memcpy(&out->sb[i], &buf[4], 26);
6913 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6914 sizeof(out->sb), false);
6920 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6921 struct drm_atomic_state *state)
6923 struct drm_connector_state *new_con_state =
6924 drm_atomic_get_new_connector_state(state, conn);
6925 struct drm_connector_state *old_con_state =
6926 drm_atomic_get_old_connector_state(state, conn);
6927 struct drm_crtc *crtc = new_con_state->crtc;
6928 struct drm_crtc_state *new_crtc_state;
6931 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6936 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6937 struct dc_info_packet hdr_infopacket;
6939 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6943 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6944 if (IS_ERR(new_crtc_state))
6945 return PTR_ERR(new_crtc_state);
6948 * DC considers the stream backends changed if the
6949 * static metadata changes. Forcing the modeset also
6950 * gives a simple way for userspace to switch from
6951 * 8bpc to 10bpc when setting the metadata to enter
6954 * Changing the static metadata after it's been
6955 * set is permissible, however. So only force a
6956 * modeset if we're entering or exiting HDR.
6958 new_crtc_state->mode_changed =
6959 !old_con_state->hdr_output_metadata ||
6960 !new_con_state->hdr_output_metadata;
6966 static const struct drm_connector_helper_funcs
6967 amdgpu_dm_connector_helper_funcs = {
6969 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6970 * modes will be filtered by drm_mode_validate_size(), and those modes
6971 * are missing after user start lightdm. So we need to renew modes list.
6972 * in get_modes call back, not just return the modes count
6974 .get_modes = get_modes,
6975 .mode_valid = amdgpu_dm_connector_mode_valid,
6976 .atomic_check = amdgpu_dm_connector_atomic_check,
6979 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6983 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6985 struct drm_atomic_state *state = new_crtc_state->state;
6986 struct drm_plane *plane;
6989 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6990 struct drm_plane_state *new_plane_state;
6992 /* Cursor planes are "fake". */
6993 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6996 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6998 if (!new_plane_state) {
7000 * The plane is enable on the CRTC and hasn't changed
7001 * state. This means that it previously passed
7002 * validation and is therefore enabled.
7008 /* We need a framebuffer to be considered enabled. */
7009 num_active += (new_plane_state->fb != NULL);
7015 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7016 struct drm_crtc_state *new_crtc_state)
7018 struct dm_crtc_state *dm_new_crtc_state =
7019 to_dm_crtc_state(new_crtc_state);
7021 dm_new_crtc_state->active_planes = 0;
7023 if (!dm_new_crtc_state->stream)
7026 dm_new_crtc_state->active_planes =
7027 count_crtc_active_planes(new_crtc_state);
7030 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7031 struct drm_atomic_state *state)
7033 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7035 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7036 struct dc *dc = adev->dm.dc;
7037 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7040 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7042 dm_update_crtc_active_planes(crtc, crtc_state);
7044 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7045 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7050 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7051 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7052 * planes are disabled, which is not supported by the hardware. And there is legacy
7053 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7055 if (crtc_state->enable &&
7056 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7057 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7061 /* In some use cases, like reset, no stream is attached */
7062 if (!dm_crtc_state->stream)
7065 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7068 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7072 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7073 const struct drm_display_mode *mode,
7074 struct drm_display_mode *adjusted_mode)
7079 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7080 .disable = dm_crtc_helper_disable,
7081 .atomic_check = dm_crtc_helper_atomic_check,
7082 .mode_fixup = dm_crtc_helper_mode_fixup,
7083 .get_scanout_position = amdgpu_crtc_get_scanout_position,
7086 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7091 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7093 switch (display_color_depth) {
7094 case COLOR_DEPTH_666:
7096 case COLOR_DEPTH_888:
7098 case COLOR_DEPTH_101010:
7100 case COLOR_DEPTH_121212:
7102 case COLOR_DEPTH_141414:
7104 case COLOR_DEPTH_161616:
7112 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7113 struct drm_crtc_state *crtc_state,
7114 struct drm_connector_state *conn_state)
7116 struct drm_atomic_state *state = crtc_state->state;
7117 struct drm_connector *connector = conn_state->connector;
7118 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7119 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7120 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7121 struct drm_dp_mst_topology_mgr *mst_mgr;
7122 struct drm_dp_mst_port *mst_port;
7123 enum dc_color_depth color_depth;
7125 bool is_y420 = false;
7127 if (!aconnector->port || !aconnector->dc_sink)
7130 mst_port = aconnector->port;
7131 mst_mgr = &aconnector->mst_port->mst_mgr;
7133 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7136 if (!state->duplicated) {
7137 int max_bpc = conn_state->max_requested_bpc;
7138 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7139 aconnector->force_yuv420_output;
7140 color_depth = convert_color_depth_from_display_info(connector,
7143 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7144 clock = adjusted_mode->clock;
7145 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7147 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7150 dm_new_connector_state->pbn,
7151 dm_mst_get_pbn_divider(aconnector->dc_link));
7152 if (dm_new_connector_state->vcpi_slots < 0) {
7153 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7154 return dm_new_connector_state->vcpi_slots;
7159 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7160 .disable = dm_encoder_helper_disable,
7161 .atomic_check = dm_encoder_helper_atomic_check
7164 #if defined(CONFIG_DRM_AMD_DC_DCN)
7165 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7166 struct dc_state *dc_state,
7167 struct dsc_mst_fairness_vars *vars)
7169 struct dc_stream_state *stream = NULL;
7170 struct drm_connector *connector;
7171 struct drm_connector_state *new_con_state;
7172 struct amdgpu_dm_connector *aconnector;
7173 struct dm_connector_state *dm_conn_state;
7175 int vcpi, pbn_div, pbn = 0;
7177 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7179 aconnector = to_amdgpu_dm_connector(connector);
7181 if (!aconnector->port)
7184 if (!new_con_state || !new_con_state->crtc)
7187 dm_conn_state = to_dm_connector_state(new_con_state);
7189 for (j = 0; j < dc_state->stream_count; j++) {
7190 stream = dc_state->streams[j];
7194 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7203 if (stream->timing.flags.DSC != 1) {
7204 drm_dp_mst_atomic_enable_dsc(state,
7212 pbn_div = dm_mst_get_pbn_divider(stream->link);
7213 clock = stream->timing.pix_clk_100hz / 10;
7214 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7215 for (j = 0; j < dc_state->stream_count; j++) {
7216 if (vars[j].aconnector == aconnector) {
7222 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7229 dm_conn_state->pbn = pbn;
7230 dm_conn_state->vcpi_slots = vcpi;
7236 static void dm_drm_plane_reset(struct drm_plane *plane)
7238 struct dm_plane_state *amdgpu_state = NULL;
7241 plane->funcs->atomic_destroy_state(plane, plane->state);
7243 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7244 WARN_ON(amdgpu_state == NULL);
7247 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7250 static struct drm_plane_state *
7251 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7253 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7255 old_dm_plane_state = to_dm_plane_state(plane->state);
7256 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7257 if (!dm_plane_state)
7260 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7262 if (old_dm_plane_state->dc_state) {
7263 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7264 dc_plane_state_retain(dm_plane_state->dc_state);
7267 return &dm_plane_state->base;
7270 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7271 struct drm_plane_state *state)
7273 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7275 if (dm_plane_state->dc_state)
7276 dc_plane_state_release(dm_plane_state->dc_state);
7278 drm_atomic_helper_plane_destroy_state(plane, state);
7281 static const struct drm_plane_funcs dm_plane_funcs = {
7282 .update_plane = drm_atomic_helper_update_plane,
7283 .disable_plane = drm_atomic_helper_disable_plane,
7284 .destroy = drm_primary_helper_destroy,
7285 .reset = dm_drm_plane_reset,
7286 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7287 .atomic_destroy_state = dm_drm_plane_destroy_state,
7288 .format_mod_supported = dm_plane_format_mod_supported,
7291 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7292 struct drm_plane_state *new_state)
7294 struct amdgpu_framebuffer *afb;
7295 struct drm_gem_object *obj;
7296 struct amdgpu_device *adev;
7297 struct amdgpu_bo *rbo;
7298 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7299 struct list_head list;
7300 struct ttm_validate_buffer tv;
7301 struct ww_acquire_ctx ticket;
7305 if (!new_state->fb) {
7306 DRM_DEBUG_KMS("No FB bound\n");
7310 afb = to_amdgpu_framebuffer(new_state->fb);
7311 obj = new_state->fb->obj[0];
7312 rbo = gem_to_amdgpu_bo(obj);
7313 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7314 INIT_LIST_HEAD(&list);
7318 list_add(&tv.head, &list);
7320 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7322 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7326 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7327 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7329 domain = AMDGPU_GEM_DOMAIN_VRAM;
7331 r = amdgpu_bo_pin(rbo, domain);
7332 if (unlikely(r != 0)) {
7333 if (r != -ERESTARTSYS)
7334 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7335 ttm_eu_backoff_reservation(&ticket, &list);
7339 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7340 if (unlikely(r != 0)) {
7341 amdgpu_bo_unpin(rbo);
7342 ttm_eu_backoff_reservation(&ticket, &list);
7343 DRM_ERROR("%p bind failed\n", rbo);
7347 ttm_eu_backoff_reservation(&ticket, &list);
7349 afb->address = amdgpu_bo_gpu_offset(rbo);
7354 * We don't do surface updates on planes that have been newly created,
7355 * but we also don't have the afb->address during atomic check.
7357 * Fill in buffer attributes depending on the address here, but only on
7358 * newly created planes since they're not being used by DC yet and this
7359 * won't modify global state.
7361 dm_plane_state_old = to_dm_plane_state(plane->state);
7362 dm_plane_state_new = to_dm_plane_state(new_state);
7364 if (dm_plane_state_new->dc_state &&
7365 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7366 struct dc_plane_state *plane_state =
7367 dm_plane_state_new->dc_state;
7368 bool force_disable_dcc = !plane_state->dcc.enable;
7370 fill_plane_buffer_attributes(
7371 adev, afb, plane_state->format, plane_state->rotation,
7373 &plane_state->tiling_info, &plane_state->plane_size,
7374 &plane_state->dcc, &plane_state->address,
7375 afb->tmz_surface, force_disable_dcc);
7381 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7382 struct drm_plane_state *old_state)
7384 struct amdgpu_bo *rbo;
7390 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7391 r = amdgpu_bo_reserve(rbo, false);
7393 DRM_ERROR("failed to reserve rbo before unpin\n");
7397 amdgpu_bo_unpin(rbo);
7398 amdgpu_bo_unreserve(rbo);
7399 amdgpu_bo_unref(&rbo);
7402 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7403 struct drm_crtc_state *new_crtc_state)
7405 struct drm_framebuffer *fb = state->fb;
7406 int min_downscale, max_upscale;
7408 int max_scale = INT_MAX;
7410 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7411 if (fb && state->crtc) {
7412 /* Validate viewport to cover the case when only the position changes */
7413 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7414 int viewport_width = state->crtc_w;
7415 int viewport_height = state->crtc_h;
7417 if (state->crtc_x < 0)
7418 viewport_width += state->crtc_x;
7419 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7420 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7422 if (state->crtc_y < 0)
7423 viewport_height += state->crtc_y;
7424 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7425 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7427 if (viewport_width < 0 || viewport_height < 0) {
7428 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7430 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7431 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7433 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7434 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7440 /* Get min/max allowed scaling factors from plane caps. */
7441 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7442 &min_downscale, &max_upscale);
7444 * Convert to drm convention: 16.16 fixed point, instead of dc's
7445 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7446 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7448 min_scale = (1000 << 16) / max_upscale;
7449 max_scale = (1000 << 16) / min_downscale;
7452 return drm_atomic_helper_check_plane_state(
7453 state, new_crtc_state, min_scale, max_scale, true, true);
7456 static int dm_plane_atomic_check(struct drm_plane *plane,
7457 struct drm_atomic_state *state)
7459 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7461 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7462 struct dc *dc = adev->dm.dc;
7463 struct dm_plane_state *dm_plane_state;
7464 struct dc_scaling_info scaling_info;
7465 struct drm_crtc_state *new_crtc_state;
7468 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7470 dm_plane_state = to_dm_plane_state(new_plane_state);
7472 if (!dm_plane_state->dc_state)
7476 drm_atomic_get_new_crtc_state(state,
7477 new_plane_state->crtc);
7478 if (!new_crtc_state)
7481 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7485 ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7489 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7495 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7496 struct drm_atomic_state *state)
7498 /* Only support async updates on cursor planes. */
7499 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7505 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7506 struct drm_atomic_state *state)
7508 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7510 struct drm_plane_state *old_state =
7511 drm_atomic_get_old_plane_state(state, plane);
7513 trace_amdgpu_dm_atomic_update_cursor(new_state);
7515 swap(plane->state->fb, new_state->fb);
7517 plane->state->src_x = new_state->src_x;
7518 plane->state->src_y = new_state->src_y;
7519 plane->state->src_w = new_state->src_w;
7520 plane->state->src_h = new_state->src_h;
7521 plane->state->crtc_x = new_state->crtc_x;
7522 plane->state->crtc_y = new_state->crtc_y;
7523 plane->state->crtc_w = new_state->crtc_w;
7524 plane->state->crtc_h = new_state->crtc_h;
7526 handle_cursor_update(plane, old_state);
7529 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7530 .prepare_fb = dm_plane_helper_prepare_fb,
7531 .cleanup_fb = dm_plane_helper_cleanup_fb,
7532 .atomic_check = dm_plane_atomic_check,
7533 .atomic_async_check = dm_plane_atomic_async_check,
7534 .atomic_async_update = dm_plane_atomic_async_update
7538 * TODO: these are currently initialized to rgb formats only.
7539 * For future use cases we should either initialize them dynamically based on
7540 * plane capabilities, or initialize this array to all formats, so internal drm
7541 * check will succeed, and let DC implement proper check
7543 static const uint32_t rgb_formats[] = {
7544 DRM_FORMAT_XRGB8888,
7545 DRM_FORMAT_ARGB8888,
7546 DRM_FORMAT_RGBA8888,
7547 DRM_FORMAT_XRGB2101010,
7548 DRM_FORMAT_XBGR2101010,
7549 DRM_FORMAT_ARGB2101010,
7550 DRM_FORMAT_ABGR2101010,
7551 DRM_FORMAT_XRGB16161616,
7552 DRM_FORMAT_XBGR16161616,
7553 DRM_FORMAT_ARGB16161616,
7554 DRM_FORMAT_ABGR16161616,
7555 DRM_FORMAT_XBGR8888,
7556 DRM_FORMAT_ABGR8888,
7560 static const uint32_t overlay_formats[] = {
7561 DRM_FORMAT_XRGB8888,
7562 DRM_FORMAT_ARGB8888,
7563 DRM_FORMAT_RGBA8888,
7564 DRM_FORMAT_XBGR8888,
7565 DRM_FORMAT_ABGR8888,
7569 static const u32 cursor_formats[] = {
7573 static int get_plane_formats(const struct drm_plane *plane,
7574 const struct dc_plane_cap *plane_cap,
7575 uint32_t *formats, int max_formats)
7577 int i, num_formats = 0;
7580 * TODO: Query support for each group of formats directly from
7581 * DC plane caps. This will require adding more formats to the
7585 switch (plane->type) {
7586 case DRM_PLANE_TYPE_PRIMARY:
7587 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7588 if (num_formats >= max_formats)
7591 formats[num_formats++] = rgb_formats[i];
7594 if (plane_cap && plane_cap->pixel_format_support.nv12)
7595 formats[num_formats++] = DRM_FORMAT_NV12;
7596 if (plane_cap && plane_cap->pixel_format_support.p010)
7597 formats[num_formats++] = DRM_FORMAT_P010;
7598 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7599 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7600 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7601 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7602 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7606 case DRM_PLANE_TYPE_OVERLAY:
7607 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7608 if (num_formats >= max_formats)
7611 formats[num_formats++] = overlay_formats[i];
7615 case DRM_PLANE_TYPE_CURSOR:
7616 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7617 if (num_formats >= max_formats)
7620 formats[num_formats++] = cursor_formats[i];
7628 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7629 struct drm_plane *plane,
7630 unsigned long possible_crtcs,
7631 const struct dc_plane_cap *plane_cap)
7633 uint32_t formats[32];
7636 unsigned int supported_rotations;
7637 uint64_t *modifiers = NULL;
7639 num_formats = get_plane_formats(plane, plane_cap, formats,
7640 ARRAY_SIZE(formats));
7642 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7646 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7647 &dm_plane_funcs, formats, num_formats,
7648 modifiers, plane->type, NULL);
7653 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7654 plane_cap && plane_cap->per_pixel_alpha) {
7655 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7656 BIT(DRM_MODE_BLEND_PREMULTI);
7658 drm_plane_create_alpha_property(plane);
7659 drm_plane_create_blend_mode_property(plane, blend_caps);
7662 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7664 (plane_cap->pixel_format_support.nv12 ||
7665 plane_cap->pixel_format_support.p010)) {
7666 /* This only affects YUV formats. */
7667 drm_plane_create_color_properties(
7669 BIT(DRM_COLOR_YCBCR_BT601) |
7670 BIT(DRM_COLOR_YCBCR_BT709) |
7671 BIT(DRM_COLOR_YCBCR_BT2020),
7672 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7673 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7674 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7677 supported_rotations =
7678 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7679 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7681 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7682 plane->type != DRM_PLANE_TYPE_CURSOR)
7683 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7684 supported_rotations);
7686 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7688 /* Create (reset) the plane state */
7689 if (plane->funcs->reset)
7690 plane->funcs->reset(plane);
7695 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7696 struct drm_plane *plane,
7697 uint32_t crtc_index)
7699 struct amdgpu_crtc *acrtc = NULL;
7700 struct drm_plane *cursor_plane;
7704 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7708 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7709 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7711 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7715 res = drm_crtc_init_with_planes(
7720 &amdgpu_dm_crtc_funcs, NULL);
7725 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7727 /* Create (reset) the plane state */
7728 if (acrtc->base.funcs->reset)
7729 acrtc->base.funcs->reset(&acrtc->base);
7731 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7732 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7734 acrtc->crtc_id = crtc_index;
7735 acrtc->base.enabled = false;
7736 acrtc->otg_inst = -1;
7738 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7739 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7740 true, MAX_COLOR_LUT_ENTRIES);
7741 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7747 kfree(cursor_plane);
7752 static int to_drm_connector_type(enum signal_type st)
7755 case SIGNAL_TYPE_HDMI_TYPE_A:
7756 return DRM_MODE_CONNECTOR_HDMIA;
7757 case SIGNAL_TYPE_EDP:
7758 return DRM_MODE_CONNECTOR_eDP;
7759 case SIGNAL_TYPE_LVDS:
7760 return DRM_MODE_CONNECTOR_LVDS;
7761 case SIGNAL_TYPE_RGB:
7762 return DRM_MODE_CONNECTOR_VGA;
7763 case SIGNAL_TYPE_DISPLAY_PORT:
7764 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7765 return DRM_MODE_CONNECTOR_DisplayPort;
7766 case SIGNAL_TYPE_DVI_DUAL_LINK:
7767 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7768 return DRM_MODE_CONNECTOR_DVID;
7769 case SIGNAL_TYPE_VIRTUAL:
7770 return DRM_MODE_CONNECTOR_VIRTUAL;
7773 return DRM_MODE_CONNECTOR_Unknown;
7777 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7779 struct drm_encoder *encoder;
7781 /* There is only one encoder per connector */
7782 drm_connector_for_each_possible_encoder(connector, encoder)
7788 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7790 struct drm_encoder *encoder;
7791 struct amdgpu_encoder *amdgpu_encoder;
7793 encoder = amdgpu_dm_connector_to_encoder(connector);
7795 if (encoder == NULL)
7798 amdgpu_encoder = to_amdgpu_encoder(encoder);
7800 amdgpu_encoder->native_mode.clock = 0;
7802 if (!list_empty(&connector->probed_modes)) {
7803 struct drm_display_mode *preferred_mode = NULL;
7805 list_for_each_entry(preferred_mode,
7806 &connector->probed_modes,
7808 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7809 amdgpu_encoder->native_mode = *preferred_mode;
7817 static struct drm_display_mode *
7818 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7820 int hdisplay, int vdisplay)
7822 struct drm_device *dev = encoder->dev;
7823 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7824 struct drm_display_mode *mode = NULL;
7825 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7827 mode = drm_mode_duplicate(dev, native_mode);
7832 mode->hdisplay = hdisplay;
7833 mode->vdisplay = vdisplay;
7834 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7835 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7841 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7842 struct drm_connector *connector)
7844 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7845 struct drm_display_mode *mode = NULL;
7846 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7847 struct amdgpu_dm_connector *amdgpu_dm_connector =
7848 to_amdgpu_dm_connector(connector);
7852 char name[DRM_DISPLAY_MODE_LEN];
7855 } common_modes[] = {
7856 { "640x480", 640, 480},
7857 { "800x600", 800, 600},
7858 { "1024x768", 1024, 768},
7859 { "1280x720", 1280, 720},
7860 { "1280x800", 1280, 800},
7861 {"1280x1024", 1280, 1024},
7862 { "1440x900", 1440, 900},
7863 {"1680x1050", 1680, 1050},
7864 {"1600x1200", 1600, 1200},
7865 {"1920x1080", 1920, 1080},
7866 {"1920x1200", 1920, 1200}
7869 n = ARRAY_SIZE(common_modes);
7871 for (i = 0; i < n; i++) {
7872 struct drm_display_mode *curmode = NULL;
7873 bool mode_existed = false;
7875 if (common_modes[i].w > native_mode->hdisplay ||
7876 common_modes[i].h > native_mode->vdisplay ||
7877 (common_modes[i].w == native_mode->hdisplay &&
7878 common_modes[i].h == native_mode->vdisplay))
7881 list_for_each_entry(curmode, &connector->probed_modes, head) {
7882 if (common_modes[i].w == curmode->hdisplay &&
7883 common_modes[i].h == curmode->vdisplay) {
7884 mode_existed = true;
7892 mode = amdgpu_dm_create_common_mode(encoder,
7893 common_modes[i].name, common_modes[i].w,
7895 drm_mode_probed_add(connector, mode);
7896 amdgpu_dm_connector->num_modes++;
7900 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
7902 struct drm_encoder *encoder;
7903 struct amdgpu_encoder *amdgpu_encoder;
7904 const struct drm_display_mode *native_mode;
7906 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
7907 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
7910 encoder = amdgpu_dm_connector_to_encoder(connector);
7914 amdgpu_encoder = to_amdgpu_encoder(encoder);
7916 native_mode = &amdgpu_encoder->native_mode;
7917 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
7920 drm_connector_set_panel_orientation_with_quirk(connector,
7921 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
7922 native_mode->hdisplay,
7923 native_mode->vdisplay);
7926 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7929 struct amdgpu_dm_connector *amdgpu_dm_connector =
7930 to_amdgpu_dm_connector(connector);
7933 /* empty probed_modes */
7934 INIT_LIST_HEAD(&connector->probed_modes);
7935 amdgpu_dm_connector->num_modes =
7936 drm_add_edid_modes(connector, edid);
7938 /* sorting the probed modes before calling function
7939 * amdgpu_dm_get_native_mode() since EDID can have
7940 * more than one preferred mode. The modes that are
7941 * later in the probed mode list could be of higher
7942 * and preferred resolution. For example, 3840x2160
7943 * resolution in base EDID preferred timing and 4096x2160
7944 * preferred resolution in DID extension block later.
7946 drm_mode_sort(&connector->probed_modes);
7947 amdgpu_dm_get_native_mode(connector);
7949 /* Freesync capabilities are reset by calling
7950 * drm_add_edid_modes() and need to be
7953 amdgpu_dm_update_freesync_caps(connector, edid);
7955 amdgpu_set_panel_orientation(connector);
7957 amdgpu_dm_connector->num_modes = 0;
7961 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7962 struct drm_display_mode *mode)
7964 struct drm_display_mode *m;
7966 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7967 if (drm_mode_equal(m, mode))
7974 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7976 const struct drm_display_mode *m;
7977 struct drm_display_mode *new_mode;
7979 uint32_t new_modes_count = 0;
7981 /* Standard FPS values
7990 * 60 - Commonly used
7991 * 48,72,96,120 - Multiples of 24
7993 static const uint32_t common_rates[] = {
7994 23976, 24000, 25000, 29970, 30000,
7995 48000, 50000, 60000, 72000, 96000, 120000
7999 * Find mode with highest refresh rate with the same resolution
8000 * as the preferred mode. Some monitors report a preferred mode
8001 * with lower resolution than the highest refresh rate supported.
8004 m = get_highest_refresh_rate_mode(aconnector, true);
8008 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8009 uint64_t target_vtotal, target_vtotal_diff;
8012 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8015 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8016 common_rates[i] > aconnector->max_vfreq * 1000)
8019 num = (unsigned long long)m->clock * 1000 * 1000;
8020 den = common_rates[i] * (unsigned long long)m->htotal;
8021 target_vtotal = div_u64(num, den);
8022 target_vtotal_diff = target_vtotal - m->vtotal;
8024 /* Check for illegal modes */
8025 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8026 m->vsync_end + target_vtotal_diff < m->vsync_start ||
8027 m->vtotal + target_vtotal_diff < m->vsync_end)
8030 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8034 new_mode->vtotal += (u16)target_vtotal_diff;
8035 new_mode->vsync_start += (u16)target_vtotal_diff;
8036 new_mode->vsync_end += (u16)target_vtotal_diff;
8037 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8038 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8040 if (!is_duplicate_mode(aconnector, new_mode)) {
8041 drm_mode_probed_add(&aconnector->base, new_mode);
8042 new_modes_count += 1;
8044 drm_mode_destroy(aconnector->base.dev, new_mode);
8047 return new_modes_count;
8050 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8053 struct amdgpu_dm_connector *amdgpu_dm_connector =
8054 to_amdgpu_dm_connector(connector);
8056 if (!(amdgpu_freesync_vid_mode && edid))
8059 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8060 amdgpu_dm_connector->num_modes +=
8061 add_fs_modes(amdgpu_dm_connector);
8064 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8066 struct amdgpu_dm_connector *amdgpu_dm_connector =
8067 to_amdgpu_dm_connector(connector);
8068 struct drm_encoder *encoder;
8069 struct edid *edid = amdgpu_dm_connector->edid;
8071 encoder = amdgpu_dm_connector_to_encoder(connector);
8073 if (!drm_edid_is_valid(edid)) {
8074 amdgpu_dm_connector->num_modes =
8075 drm_add_modes_noedid(connector, 640, 480);
8077 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8078 amdgpu_dm_connector_add_common_modes(encoder, connector);
8079 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8081 amdgpu_dm_fbc_init(connector);
8083 return amdgpu_dm_connector->num_modes;
8086 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8087 struct amdgpu_dm_connector *aconnector,
8089 struct dc_link *link,
8092 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8095 * Some of the properties below require access to state, like bpc.
8096 * Allocate some default initial connector state with our reset helper.
8098 if (aconnector->base.funcs->reset)
8099 aconnector->base.funcs->reset(&aconnector->base);
8101 aconnector->connector_id = link_index;
8102 aconnector->dc_link = link;
8103 aconnector->base.interlace_allowed = false;
8104 aconnector->base.doublescan_allowed = false;
8105 aconnector->base.stereo_allowed = false;
8106 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8107 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8108 aconnector->audio_inst = -1;
8109 mutex_init(&aconnector->hpd_lock);
8112 * configure support HPD hot plug connector_>polled default value is 0
8113 * which means HPD hot plug not supported
8115 switch (connector_type) {
8116 case DRM_MODE_CONNECTOR_HDMIA:
8117 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8118 aconnector->base.ycbcr_420_allowed =
8119 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8121 case DRM_MODE_CONNECTOR_DisplayPort:
8122 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8123 if (link->is_dig_mapping_flexible &&
8124 link->dc->res_pool->funcs->link_encs_assign) {
8126 link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
8127 if (!link->link_enc)
8129 link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
8133 aconnector->base.ycbcr_420_allowed =
8134 link->link_enc->features.dp_ycbcr420_supported ? true : false;
8136 case DRM_MODE_CONNECTOR_DVID:
8137 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8143 drm_object_attach_property(&aconnector->base.base,
8144 dm->ddev->mode_config.scaling_mode_property,
8145 DRM_MODE_SCALE_NONE);
8147 drm_object_attach_property(&aconnector->base.base,
8148 adev->mode_info.underscan_property,
8150 drm_object_attach_property(&aconnector->base.base,
8151 adev->mode_info.underscan_hborder_property,
8153 drm_object_attach_property(&aconnector->base.base,
8154 adev->mode_info.underscan_vborder_property,
8157 if (!aconnector->mst_port)
8158 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8160 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8161 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8162 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8164 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8165 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8166 drm_object_attach_property(&aconnector->base.base,
8167 adev->mode_info.abm_level_property, 0);
8170 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8171 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8172 connector_type == DRM_MODE_CONNECTOR_eDP) {
8173 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8175 if (!aconnector->mst_port)
8176 drm_connector_attach_vrr_capable_property(&aconnector->base);
8178 #ifdef CONFIG_DRM_AMD_DC_HDCP
8179 if (adev->dm.hdcp_workqueue)
8180 drm_connector_attach_content_protection_property(&aconnector->base, true);
8185 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8186 struct i2c_msg *msgs, int num)
8188 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8189 struct ddc_service *ddc_service = i2c->ddc_service;
8190 struct i2c_command cmd;
8194 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8199 cmd.number_of_payloads = num;
8200 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8203 for (i = 0; i < num; i++) {
8204 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8205 cmd.payloads[i].address = msgs[i].addr;
8206 cmd.payloads[i].length = msgs[i].len;
8207 cmd.payloads[i].data = msgs[i].buf;
8211 ddc_service->ctx->dc,
8212 ddc_service->ddc_pin->hw_info.ddc_channel,
8216 kfree(cmd.payloads);
8220 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8222 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8225 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8226 .master_xfer = amdgpu_dm_i2c_xfer,
8227 .functionality = amdgpu_dm_i2c_func,
8230 static struct amdgpu_i2c_adapter *
8231 create_i2c(struct ddc_service *ddc_service,
8235 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8236 struct amdgpu_i2c_adapter *i2c;
8238 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8241 i2c->base.owner = THIS_MODULE;
8242 i2c->base.class = I2C_CLASS_DDC;
8243 i2c->base.dev.parent = &adev->pdev->dev;
8244 i2c->base.algo = &amdgpu_dm_i2c_algo;
8245 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8246 i2c_set_adapdata(&i2c->base, i2c);
8247 i2c->ddc_service = ddc_service;
8248 if (i2c->ddc_service->ddc_pin)
8249 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8256 * Note: this function assumes that dc_link_detect() was called for the
8257 * dc_link which will be represented by this aconnector.
8259 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8260 struct amdgpu_dm_connector *aconnector,
8261 uint32_t link_index,
8262 struct amdgpu_encoder *aencoder)
8266 struct dc *dc = dm->dc;
8267 struct dc_link *link = dc_get_link_at_index(dc, link_index);
8268 struct amdgpu_i2c_adapter *i2c;
8270 link->priv = aconnector;
8272 DRM_DEBUG_DRIVER("%s()\n", __func__);
8274 i2c = create_i2c(link->ddc, link->link_index, &res);
8276 DRM_ERROR("Failed to create i2c adapter data\n");
8280 aconnector->i2c = i2c;
8281 res = i2c_add_adapter(&i2c->base);
8284 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8288 connector_type = to_drm_connector_type(link->connector_signal);
8290 res = drm_connector_init_with_ddc(
8293 &amdgpu_dm_connector_funcs,
8298 DRM_ERROR("connector_init failed\n");
8299 aconnector->connector_id = -1;
8303 drm_connector_helper_add(
8305 &amdgpu_dm_connector_helper_funcs);
8307 amdgpu_dm_connector_init_helper(
8314 drm_connector_attach_encoder(
8315 &aconnector->base, &aencoder->base);
8317 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8318 || connector_type == DRM_MODE_CONNECTOR_eDP)
8319 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8324 aconnector->i2c = NULL;
8329 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8331 switch (adev->mode_info.num_crtc) {
8348 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8349 struct amdgpu_encoder *aencoder,
8350 uint32_t link_index)
8352 struct amdgpu_device *adev = drm_to_adev(dev);
8354 int res = drm_encoder_init(dev,
8356 &amdgpu_dm_encoder_funcs,
8357 DRM_MODE_ENCODER_TMDS,
8360 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8363 aencoder->encoder_id = link_index;
8365 aencoder->encoder_id = -1;
8367 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8372 static void manage_dm_interrupts(struct amdgpu_device *adev,
8373 struct amdgpu_crtc *acrtc,
8377 * We have no guarantee that the frontend index maps to the same
8378 * backend index - some even map to more than one.
8380 * TODO: Use a different interrupt or check DC itself for the mapping.
8383 amdgpu_display_crtc_idx_to_irq_type(
8388 drm_crtc_vblank_on(&acrtc->base);
8391 &adev->pageflip_irq,
8393 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8400 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8408 &adev->pageflip_irq,
8410 drm_crtc_vblank_off(&acrtc->base);
8414 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8415 struct amdgpu_crtc *acrtc)
8418 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8421 * This reads the current state for the IRQ and force reapplies
8422 * the setting to hardware.
8424 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8428 is_scaling_state_different(const struct dm_connector_state *dm_state,
8429 const struct dm_connector_state *old_dm_state)
8431 if (dm_state->scaling != old_dm_state->scaling)
8433 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8434 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8436 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8437 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8439 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8440 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8445 #ifdef CONFIG_DRM_AMD_DC_HDCP
8446 static bool is_content_protection_different(struct drm_connector_state *state,
8447 const struct drm_connector_state *old_state,
8448 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8450 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8451 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8453 /* Handle: Type0/1 change */
8454 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8455 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8456 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8460 /* CP is being re enabled, ignore this
8462 * Handles: ENABLED -> DESIRED
8464 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8465 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8466 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8470 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8472 * Handles: UNDESIRED -> ENABLED
8474 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8475 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8476 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8478 /* Stream removed and re-enabled
8480 * Can sometimes overlap with the HPD case,
8481 * thus set update_hdcp to false to avoid
8482 * setting HDCP multiple times.
8484 * Handles: DESIRED -> DESIRED (Special case)
8486 if (!(old_state->crtc && old_state->crtc->enabled) &&
8487 state->crtc && state->crtc->enabled &&
8488 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8489 dm_con_state->update_hdcp = false;
8493 /* Hot-plug, headless s3, dpms
8495 * Only start HDCP if the display is connected/enabled.
8496 * update_hdcp flag will be set to false until the next
8499 * Handles: DESIRED -> DESIRED (Special case)
8501 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8502 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8503 dm_con_state->update_hdcp = false;
8508 * Handles: UNDESIRED -> UNDESIRED
8509 * DESIRED -> DESIRED
8510 * ENABLED -> ENABLED
8512 if (old_state->content_protection == state->content_protection)
8516 * Handles: UNDESIRED -> DESIRED
8517 * DESIRED -> UNDESIRED
8518 * ENABLED -> UNDESIRED
8520 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8524 * Handles: DESIRED -> ENABLED
8530 static void remove_stream(struct amdgpu_device *adev,
8531 struct amdgpu_crtc *acrtc,
8532 struct dc_stream_state *stream)
8534 /* this is the update mode case */
8536 acrtc->otg_inst = -1;
8537 acrtc->enabled = false;
8540 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8541 struct dc_cursor_position *position)
8543 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8545 int xorigin = 0, yorigin = 0;
8547 if (!crtc || !plane->state->fb)
8550 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8551 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8552 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8554 plane->state->crtc_w,
8555 plane->state->crtc_h);
8559 x = plane->state->crtc_x;
8560 y = plane->state->crtc_y;
8562 if (x <= -amdgpu_crtc->max_cursor_width ||
8563 y <= -amdgpu_crtc->max_cursor_height)
8567 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8571 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8574 position->enable = true;
8575 position->translate_by_source = true;
8578 position->x_hotspot = xorigin;
8579 position->y_hotspot = yorigin;
8584 static void handle_cursor_update(struct drm_plane *plane,
8585 struct drm_plane_state *old_plane_state)
8587 struct amdgpu_device *adev = drm_to_adev(plane->dev);
8588 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8589 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8590 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8591 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8592 uint64_t address = afb ? afb->address : 0;
8593 struct dc_cursor_position position = {0};
8594 struct dc_cursor_attributes attributes;
8597 if (!plane->state->fb && !old_plane_state->fb)
8600 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8602 amdgpu_crtc->crtc_id,
8603 plane->state->crtc_w,
8604 plane->state->crtc_h);
8606 ret = get_cursor_position(plane, crtc, &position);
8610 if (!position.enable) {
8611 /* turn off cursor */
8612 if (crtc_state && crtc_state->stream) {
8613 mutex_lock(&adev->dm.dc_lock);
8614 dc_stream_set_cursor_position(crtc_state->stream,
8616 mutex_unlock(&adev->dm.dc_lock);
8621 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8622 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8624 memset(&attributes, 0, sizeof(attributes));
8625 attributes.address.high_part = upper_32_bits(address);
8626 attributes.address.low_part = lower_32_bits(address);
8627 attributes.width = plane->state->crtc_w;
8628 attributes.height = plane->state->crtc_h;
8629 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8630 attributes.rotation_angle = 0;
8631 attributes.attribute_flags.value = 0;
8633 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8635 if (crtc_state->stream) {
8636 mutex_lock(&adev->dm.dc_lock);
8637 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8639 DRM_ERROR("DC failed to set cursor attributes\n");
8641 if (!dc_stream_set_cursor_position(crtc_state->stream,
8643 DRM_ERROR("DC failed to set cursor position\n");
8644 mutex_unlock(&adev->dm.dc_lock);
8648 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8651 assert_spin_locked(&acrtc->base.dev->event_lock);
8652 WARN_ON(acrtc->event);
8654 acrtc->event = acrtc->base.state->event;
8656 /* Set the flip status */
8657 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8659 /* Mark this event as consumed */
8660 acrtc->base.state->event = NULL;
8662 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8666 static void update_freesync_state_on_stream(
8667 struct amdgpu_display_manager *dm,
8668 struct dm_crtc_state *new_crtc_state,
8669 struct dc_stream_state *new_stream,
8670 struct dc_plane_state *surface,
8671 u32 flip_timestamp_in_us)
8673 struct mod_vrr_params vrr_params;
8674 struct dc_info_packet vrr_infopacket = {0};
8675 struct amdgpu_device *adev = dm->adev;
8676 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8677 unsigned long flags;
8678 bool pack_sdp_v1_3 = false;
8684 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8685 * For now it's sufficient to just guard against these conditions.
8688 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8691 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8692 vrr_params = acrtc->dm_irq_params.vrr_params;
8695 mod_freesync_handle_preflip(
8696 dm->freesync_module,
8699 flip_timestamp_in_us,
8702 if (adev->family < AMDGPU_FAMILY_AI &&
8703 amdgpu_dm_vrr_active(new_crtc_state)) {
8704 mod_freesync_handle_v_update(dm->freesync_module,
8705 new_stream, &vrr_params);
8707 /* Need to call this before the frame ends. */
8708 dc_stream_adjust_vmin_vmax(dm->dc,
8709 new_crtc_state->stream,
8710 &vrr_params.adjust);
8714 mod_freesync_build_vrr_infopacket(
8715 dm->freesync_module,
8719 TRANSFER_FUNC_UNKNOWN,
8723 new_crtc_state->freesync_timing_changed |=
8724 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8726 sizeof(vrr_params.adjust)) != 0);
8728 new_crtc_state->freesync_vrr_info_changed |=
8729 (memcmp(&new_crtc_state->vrr_infopacket,
8731 sizeof(vrr_infopacket)) != 0);
8733 acrtc->dm_irq_params.vrr_params = vrr_params;
8734 new_crtc_state->vrr_infopacket = vrr_infopacket;
8736 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8737 new_stream->vrr_infopacket = vrr_infopacket;
8739 if (new_crtc_state->freesync_vrr_info_changed)
8740 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8741 new_crtc_state->base.crtc->base.id,
8742 (int)new_crtc_state->base.vrr_enabled,
8743 (int)vrr_params.state);
8745 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8748 static void update_stream_irq_parameters(
8749 struct amdgpu_display_manager *dm,
8750 struct dm_crtc_state *new_crtc_state)
8752 struct dc_stream_state *new_stream = new_crtc_state->stream;
8753 struct mod_vrr_params vrr_params;
8754 struct mod_freesync_config config = new_crtc_state->freesync_config;
8755 struct amdgpu_device *adev = dm->adev;
8756 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8757 unsigned long flags;
8763 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8764 * For now it's sufficient to just guard against these conditions.
8766 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8769 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8770 vrr_params = acrtc->dm_irq_params.vrr_params;
8772 if (new_crtc_state->vrr_supported &&
8773 config.min_refresh_in_uhz &&
8774 config.max_refresh_in_uhz) {
8776 * if freesync compatible mode was set, config.state will be set
8779 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8780 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8781 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8782 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8783 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8784 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8785 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8787 config.state = new_crtc_state->base.vrr_enabled ?
8788 VRR_STATE_ACTIVE_VARIABLE :
8792 config.state = VRR_STATE_UNSUPPORTED;
8795 mod_freesync_build_vrr_params(dm->freesync_module,
8797 &config, &vrr_params);
8799 new_crtc_state->freesync_timing_changed |=
8800 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8801 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8803 new_crtc_state->freesync_config = config;
8804 /* Copy state for access from DM IRQ handler */
8805 acrtc->dm_irq_params.freesync_config = config;
8806 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8807 acrtc->dm_irq_params.vrr_params = vrr_params;
8808 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8811 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8812 struct dm_crtc_state *new_state)
8814 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8815 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8817 if (!old_vrr_active && new_vrr_active) {
8818 /* Transition VRR inactive -> active:
8819 * While VRR is active, we must not disable vblank irq, as a
8820 * reenable after disable would compute bogus vblank/pflip
8821 * timestamps if it likely happened inside display front-porch.
8823 * We also need vupdate irq for the actual core vblank handling
8826 dm_set_vupdate_irq(new_state->base.crtc, true);
8827 drm_crtc_vblank_get(new_state->base.crtc);
8828 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8829 __func__, new_state->base.crtc->base.id);
8830 } else if (old_vrr_active && !new_vrr_active) {
8831 /* Transition VRR active -> inactive:
8832 * Allow vblank irq disable again for fixed refresh rate.
8834 dm_set_vupdate_irq(new_state->base.crtc, false);
8835 drm_crtc_vblank_put(new_state->base.crtc);
8836 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8837 __func__, new_state->base.crtc->base.id);
8841 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8843 struct drm_plane *plane;
8844 struct drm_plane_state *old_plane_state;
8848 * TODO: Make this per-stream so we don't issue redundant updates for
8849 * commits with multiple streams.
8851 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8852 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8853 handle_cursor_update(plane, old_plane_state);
8856 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8857 struct dc_state *dc_state,
8858 struct drm_device *dev,
8859 struct amdgpu_display_manager *dm,
8860 struct drm_crtc *pcrtc,
8861 bool wait_for_vblank)
8864 uint64_t timestamp_ns;
8865 struct drm_plane *plane;
8866 struct drm_plane_state *old_plane_state, *new_plane_state;
8867 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8868 struct drm_crtc_state *new_pcrtc_state =
8869 drm_atomic_get_new_crtc_state(state, pcrtc);
8870 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8871 struct dm_crtc_state *dm_old_crtc_state =
8872 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8873 int planes_count = 0, vpos, hpos;
8875 unsigned long flags;
8876 struct amdgpu_bo *abo;
8877 uint32_t target_vblank, last_flip_vblank;
8878 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8879 bool pflip_present = false;
8881 struct dc_surface_update surface_updates[MAX_SURFACES];
8882 struct dc_plane_info plane_infos[MAX_SURFACES];
8883 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8884 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8885 struct dc_stream_update stream_update;
8888 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8891 dm_error("Failed to allocate update bundle\n");
8896 * Disable the cursor first if we're disabling all the planes.
8897 * It'll remain on the screen after the planes are re-enabled
8900 if (acrtc_state->active_planes == 0)
8901 amdgpu_dm_commit_cursors(state);
8903 /* update planes when needed */
8904 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8905 struct drm_crtc *crtc = new_plane_state->crtc;
8906 struct drm_crtc_state *new_crtc_state;
8907 struct drm_framebuffer *fb = new_plane_state->fb;
8908 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8909 bool plane_needs_flip;
8910 struct dc_plane_state *dc_plane;
8911 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8913 /* Cursor plane is handled after stream updates */
8914 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8917 if (!fb || !crtc || pcrtc != crtc)
8920 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8921 if (!new_crtc_state->active)
8924 dc_plane = dm_new_plane_state->dc_state;
8926 bundle->surface_updates[planes_count].surface = dc_plane;
8927 if (new_pcrtc_state->color_mgmt_changed) {
8928 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8929 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8930 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8933 fill_dc_scaling_info(new_plane_state,
8934 &bundle->scaling_infos[planes_count]);
8936 bundle->surface_updates[planes_count].scaling_info =
8937 &bundle->scaling_infos[planes_count];
8939 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8941 pflip_present = pflip_present || plane_needs_flip;
8943 if (!plane_needs_flip) {
8948 abo = gem_to_amdgpu_bo(fb->obj[0]);
8951 * Wait for all fences on this FB. Do limited wait to avoid
8952 * deadlock during GPU reset when this fence will not signal
8953 * but we hold reservation lock for the BO.
8955 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
8956 msecs_to_jiffies(5000));
8957 if (unlikely(r <= 0))
8958 DRM_ERROR("Waiting for fences timed out!");
8960 fill_dc_plane_info_and_addr(
8961 dm->adev, new_plane_state,
8963 &bundle->plane_infos[planes_count],
8964 &bundle->flip_addrs[planes_count].address,
8965 afb->tmz_surface, false);
8967 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8968 new_plane_state->plane->index,
8969 bundle->plane_infos[planes_count].dcc.enable);
8971 bundle->surface_updates[planes_count].plane_info =
8972 &bundle->plane_infos[planes_count];
8975 * Only allow immediate flips for fast updates that don't
8976 * change FB pitch, DCC state, rotation or mirroing.
8978 bundle->flip_addrs[planes_count].flip_immediate =
8979 crtc->state->async_flip &&
8980 acrtc_state->update_type == UPDATE_TYPE_FAST;
8982 timestamp_ns = ktime_get_ns();
8983 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8984 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8985 bundle->surface_updates[planes_count].surface = dc_plane;
8987 if (!bundle->surface_updates[planes_count].surface) {
8988 DRM_ERROR("No surface for CRTC: id=%d\n",
8989 acrtc_attach->crtc_id);
8993 if (plane == pcrtc->primary)
8994 update_freesync_state_on_stream(
8997 acrtc_state->stream,
8999 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9001 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9003 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9004 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9010 if (pflip_present) {
9012 /* Use old throttling in non-vrr fixed refresh rate mode
9013 * to keep flip scheduling based on target vblank counts
9014 * working in a backwards compatible way, e.g., for
9015 * clients using the GLX_OML_sync_control extension or
9016 * DRI3/Present extension with defined target_msc.
9018 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9021 /* For variable refresh rate mode only:
9022 * Get vblank of last completed flip to avoid > 1 vrr
9023 * flips per video frame by use of throttling, but allow
9024 * flip programming anywhere in the possibly large
9025 * variable vrr vblank interval for fine-grained flip
9026 * timing control and more opportunity to avoid stutter
9027 * on late submission of flips.
9029 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9030 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9031 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9034 target_vblank = last_flip_vblank + wait_for_vblank;
9037 * Wait until we're out of the vertical blank period before the one
9038 * targeted by the flip
9040 while ((acrtc_attach->enabled &&
9041 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9042 0, &vpos, &hpos, NULL,
9043 NULL, &pcrtc->hwmode)
9044 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9045 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9046 (int)(target_vblank -
9047 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9048 usleep_range(1000, 1100);
9052 * Prepare the flip event for the pageflip interrupt to handle.
9054 * This only works in the case where we've already turned on the
9055 * appropriate hardware blocks (eg. HUBP) so in the transition case
9056 * from 0 -> n planes we have to skip a hardware generated event
9057 * and rely on sending it from software.
9059 if (acrtc_attach->base.state->event &&
9060 acrtc_state->active_planes > 0 &&
9061 !acrtc_state->force_dpms_off) {
9062 drm_crtc_vblank_get(pcrtc);
9064 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9066 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9067 prepare_flip_isr(acrtc_attach);
9069 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9072 if (acrtc_state->stream) {
9073 if (acrtc_state->freesync_vrr_info_changed)
9074 bundle->stream_update.vrr_infopacket =
9075 &acrtc_state->stream->vrr_infopacket;
9079 /* Update the planes if changed or disable if we don't have any. */
9080 if ((planes_count || acrtc_state->active_planes == 0) &&
9081 acrtc_state->stream) {
9082 #if defined(CONFIG_DRM_AMD_DC_DCN)
9084 * If PSR or idle optimizations are enabled then flush out
9085 * any pending work before hardware programming.
9087 if (dm->vblank_control_workqueue)
9088 flush_workqueue(dm->vblank_control_workqueue);
9091 bundle->stream_update.stream = acrtc_state->stream;
9092 if (new_pcrtc_state->mode_changed) {
9093 bundle->stream_update.src = acrtc_state->stream->src;
9094 bundle->stream_update.dst = acrtc_state->stream->dst;
9097 if (new_pcrtc_state->color_mgmt_changed) {
9099 * TODO: This isn't fully correct since we've actually
9100 * already modified the stream in place.
9102 bundle->stream_update.gamut_remap =
9103 &acrtc_state->stream->gamut_remap_matrix;
9104 bundle->stream_update.output_csc_transform =
9105 &acrtc_state->stream->csc_color_matrix;
9106 bundle->stream_update.out_transfer_func =
9107 acrtc_state->stream->out_transfer_func;
9110 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9111 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9112 bundle->stream_update.abm_level = &acrtc_state->abm_level;
9115 * If FreeSync state on the stream has changed then we need to
9116 * re-adjust the min/max bounds now that DC doesn't handle this
9117 * as part of commit.
9119 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9120 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9121 dc_stream_adjust_vmin_vmax(
9122 dm->dc, acrtc_state->stream,
9123 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9124 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9126 mutex_lock(&dm->dc_lock);
9127 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9128 acrtc_state->stream->link->psr_settings.psr_allow_active)
9129 amdgpu_dm_psr_disable(acrtc_state->stream);
9131 dc_commit_updates_for_stream(dm->dc,
9132 bundle->surface_updates,
9134 acrtc_state->stream,
9135 &bundle->stream_update,
9139 * Enable or disable the interrupts on the backend.
9141 * Most pipes are put into power gating when unused.
9143 * When power gating is enabled on a pipe we lose the
9144 * interrupt enablement state when power gating is disabled.
9146 * So we need to update the IRQ control state in hardware
9147 * whenever the pipe turns on (since it could be previously
9148 * power gated) or off (since some pipes can't be power gated
9151 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9152 dm_update_pflip_irq_state(drm_to_adev(dev),
9155 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9156 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9157 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9158 amdgpu_dm_link_setup_psr(acrtc_state->stream);
9160 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9161 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9162 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9163 struct amdgpu_dm_connector *aconn =
9164 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9166 if (aconn->psr_skip_count > 0)
9167 aconn->psr_skip_count--;
9169 /* Allow PSR when skip count is 0. */
9170 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9172 acrtc_attach->dm_irq_params.allow_psr_entry = false;
9175 mutex_unlock(&dm->dc_lock);
9179 * Update cursor state *after* programming all the planes.
9180 * This avoids redundant programming in the case where we're going
9181 * to be disabling a single plane - those pipes are being disabled.
9183 if (acrtc_state->active_planes)
9184 amdgpu_dm_commit_cursors(state);
9190 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9191 struct drm_atomic_state *state)
9193 struct amdgpu_device *adev = drm_to_adev(dev);
9194 struct amdgpu_dm_connector *aconnector;
9195 struct drm_connector *connector;
9196 struct drm_connector_state *old_con_state, *new_con_state;
9197 struct drm_crtc_state *new_crtc_state;
9198 struct dm_crtc_state *new_dm_crtc_state;
9199 const struct dc_stream_status *status;
9202 /* Notify device removals. */
9203 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9204 if (old_con_state->crtc != new_con_state->crtc) {
9205 /* CRTC changes require notification. */
9209 if (!new_con_state->crtc)
9212 new_crtc_state = drm_atomic_get_new_crtc_state(
9213 state, new_con_state->crtc);
9215 if (!new_crtc_state)
9218 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9222 aconnector = to_amdgpu_dm_connector(connector);
9224 mutex_lock(&adev->dm.audio_lock);
9225 inst = aconnector->audio_inst;
9226 aconnector->audio_inst = -1;
9227 mutex_unlock(&adev->dm.audio_lock);
9229 amdgpu_dm_audio_eld_notify(adev, inst);
9232 /* Notify audio device additions. */
9233 for_each_new_connector_in_state(state, connector, new_con_state, i) {
9234 if (!new_con_state->crtc)
9237 new_crtc_state = drm_atomic_get_new_crtc_state(
9238 state, new_con_state->crtc);
9240 if (!new_crtc_state)
9243 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9246 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9247 if (!new_dm_crtc_state->stream)
9250 status = dc_stream_get_status(new_dm_crtc_state->stream);
9254 aconnector = to_amdgpu_dm_connector(connector);
9256 mutex_lock(&adev->dm.audio_lock);
9257 inst = status->audio_inst;
9258 aconnector->audio_inst = inst;
9259 mutex_unlock(&adev->dm.audio_lock);
9261 amdgpu_dm_audio_eld_notify(adev, inst);
9266 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9267 * @crtc_state: the DRM CRTC state
9268 * @stream_state: the DC stream state.
9270 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9271 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9273 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9274 struct dc_stream_state *stream_state)
9276 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9280 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9281 * @state: The atomic state to commit
9283 * This will tell DC to commit the constructed DC state from atomic_check,
9284 * programming the hardware. Any failures here implies a hardware failure, since
9285 * atomic check should have filtered anything non-kosher.
9287 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9289 struct drm_device *dev = state->dev;
9290 struct amdgpu_device *adev = drm_to_adev(dev);
9291 struct amdgpu_display_manager *dm = &adev->dm;
9292 struct dm_atomic_state *dm_state;
9293 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9295 struct drm_crtc *crtc;
9296 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9297 unsigned long flags;
9298 bool wait_for_vblank = true;
9299 struct drm_connector *connector;
9300 struct drm_connector_state *old_con_state, *new_con_state;
9301 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9302 int crtc_disable_count = 0;
9303 bool mode_set_reset_required = false;
9305 trace_amdgpu_dm_atomic_commit_tail_begin(state);
9307 drm_atomic_helper_update_legacy_modeset_state(dev, state);
9309 dm_state = dm_atomic_get_new_state(state);
9310 if (dm_state && dm_state->context) {
9311 dc_state = dm_state->context;
9313 /* No state changes, retain current state. */
9314 dc_state_temp = dc_create_state(dm->dc);
9315 ASSERT(dc_state_temp);
9316 dc_state = dc_state_temp;
9317 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9320 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9321 new_crtc_state, i) {
9322 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9324 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9326 if (old_crtc_state->active &&
9327 (!new_crtc_state->active ||
9328 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9329 manage_dm_interrupts(adev, acrtc, false);
9330 dc_stream_release(dm_old_crtc_state->stream);
9334 drm_atomic_helper_calc_timestamping_constants(state);
9336 /* update changed items */
9337 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9338 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9340 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9341 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9344 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9345 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9346 "connectors_changed:%d\n",
9348 new_crtc_state->enable,
9349 new_crtc_state->active,
9350 new_crtc_state->planes_changed,
9351 new_crtc_state->mode_changed,
9352 new_crtc_state->active_changed,
9353 new_crtc_state->connectors_changed);
9355 /* Disable cursor if disabling crtc */
9356 if (old_crtc_state->active && !new_crtc_state->active) {
9357 struct dc_cursor_position position;
9359 memset(&position, 0, sizeof(position));
9360 mutex_lock(&dm->dc_lock);
9361 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9362 mutex_unlock(&dm->dc_lock);
9365 /* Copy all transient state flags into dc state */
9366 if (dm_new_crtc_state->stream) {
9367 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9368 dm_new_crtc_state->stream);
9371 /* handles headless hotplug case, updating new_state and
9372 * aconnector as needed
9375 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9377 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9379 if (!dm_new_crtc_state->stream) {
9381 * this could happen because of issues with
9382 * userspace notifications delivery.
9383 * In this case userspace tries to set mode on
9384 * display which is disconnected in fact.
9385 * dc_sink is NULL in this case on aconnector.
9386 * We expect reset mode will come soon.
9388 * This can also happen when unplug is done
9389 * during resume sequence ended
9391 * In this case, we want to pretend we still
9392 * have a sink to keep the pipe running so that
9393 * hw state is consistent with the sw state
9395 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9396 __func__, acrtc->base.base.id);
9400 if (dm_old_crtc_state->stream)
9401 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9403 pm_runtime_get_noresume(dev->dev);
9405 acrtc->enabled = true;
9406 acrtc->hw_mode = new_crtc_state->mode;
9407 crtc->hwmode = new_crtc_state->mode;
9408 mode_set_reset_required = true;
9409 } else if (modereset_required(new_crtc_state)) {
9410 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9411 /* i.e. reset mode */
9412 if (dm_old_crtc_state->stream)
9413 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9415 mode_set_reset_required = true;
9417 } /* for_each_crtc_in_state() */
9420 /* if there mode set or reset, disable eDP PSR */
9421 if (mode_set_reset_required) {
9422 #if defined(CONFIG_DRM_AMD_DC_DCN)
9423 if (dm->vblank_control_workqueue)
9424 flush_workqueue(dm->vblank_control_workqueue);
9426 amdgpu_dm_psr_disable_all(dm);
9429 dm_enable_per_frame_crtc_master_sync(dc_state);
9430 mutex_lock(&dm->dc_lock);
9431 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9432 #if defined(CONFIG_DRM_AMD_DC_DCN)
9433 /* Allow idle optimization when vblank count is 0 for display off */
9434 if (dm->active_vblank_irq_count == 0)
9435 dc_allow_idle_optimizations(dm->dc,true);
9437 mutex_unlock(&dm->dc_lock);
9440 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9441 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9443 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9445 if (dm_new_crtc_state->stream != NULL) {
9446 const struct dc_stream_status *status =
9447 dc_stream_get_status(dm_new_crtc_state->stream);
9450 status = dc_stream_get_status_from_state(dc_state,
9451 dm_new_crtc_state->stream);
9453 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9455 acrtc->otg_inst = status->primary_otg_inst;
9458 #ifdef CONFIG_DRM_AMD_DC_HDCP
9459 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9460 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9461 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9462 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9464 new_crtc_state = NULL;
9467 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9469 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9471 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9472 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9473 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9474 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9475 dm_new_con_state->update_hdcp = true;
9479 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9480 hdcp_update_display(
9481 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9482 new_con_state->hdcp_content_type,
9483 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9487 /* Handle connector state changes */
9488 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9489 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9490 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9491 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9492 struct dc_surface_update dummy_updates[MAX_SURFACES];
9493 struct dc_stream_update stream_update;
9494 struct dc_info_packet hdr_packet;
9495 struct dc_stream_status *status = NULL;
9496 bool abm_changed, hdr_changed, scaling_changed;
9498 memset(&dummy_updates, 0, sizeof(dummy_updates));
9499 memset(&stream_update, 0, sizeof(stream_update));
9502 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9503 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9506 /* Skip any modesets/resets */
9507 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9510 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9511 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9513 scaling_changed = is_scaling_state_different(dm_new_con_state,
9516 abm_changed = dm_new_crtc_state->abm_level !=
9517 dm_old_crtc_state->abm_level;
9520 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9522 if (!scaling_changed && !abm_changed && !hdr_changed)
9525 stream_update.stream = dm_new_crtc_state->stream;
9526 if (scaling_changed) {
9527 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9528 dm_new_con_state, dm_new_crtc_state->stream);
9530 stream_update.src = dm_new_crtc_state->stream->src;
9531 stream_update.dst = dm_new_crtc_state->stream->dst;
9535 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9537 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9541 fill_hdr_info_packet(new_con_state, &hdr_packet);
9542 stream_update.hdr_static_metadata = &hdr_packet;
9545 status = dc_stream_get_status(dm_new_crtc_state->stream);
9547 if (WARN_ON(!status))
9550 WARN_ON(!status->plane_count);
9553 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9554 * Here we create an empty update on each plane.
9555 * To fix this, DC should permit updating only stream properties.
9557 for (j = 0; j < status->plane_count; j++)
9558 dummy_updates[j].surface = status->plane_states[0];
9561 mutex_lock(&dm->dc_lock);
9562 dc_commit_updates_for_stream(dm->dc,
9564 status->plane_count,
9565 dm_new_crtc_state->stream,
9568 mutex_unlock(&dm->dc_lock);
9571 /* Count number of newly disabled CRTCs for dropping PM refs later. */
9572 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9573 new_crtc_state, i) {
9574 if (old_crtc_state->active && !new_crtc_state->active)
9575 crtc_disable_count++;
9577 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9578 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9580 /* For freesync config update on crtc state and params for irq */
9581 update_stream_irq_parameters(dm, dm_new_crtc_state);
9583 /* Handle vrr on->off / off->on transitions */
9584 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9589 * Enable interrupts for CRTCs that are newly enabled or went through
9590 * a modeset. It was intentionally deferred until after the front end
9591 * state was modified to wait until the OTG was on and so the IRQ
9592 * handlers didn't access stale or invalid state.
9594 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9595 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9596 #ifdef CONFIG_DEBUG_FS
9597 bool configure_crc = false;
9598 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9599 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9600 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9602 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9603 cur_crc_src = acrtc->dm_irq_params.crc_src;
9604 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9606 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9608 if (new_crtc_state->active &&
9609 (!old_crtc_state->active ||
9610 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9611 dc_stream_retain(dm_new_crtc_state->stream);
9612 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9613 manage_dm_interrupts(adev, acrtc, true);
9615 #ifdef CONFIG_DEBUG_FS
9617 * Frontend may have changed so reapply the CRC capture
9618 * settings for the stream.
9620 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9622 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9623 configure_crc = true;
9624 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9625 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9626 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9627 acrtc->dm_irq_params.crc_window.update_win = true;
9628 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9629 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9630 crc_rd_wrk->crtc = crtc;
9631 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9632 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9638 if (amdgpu_dm_crtc_configure_crc_source(
9639 crtc, dm_new_crtc_state, cur_crc_src))
9640 DRM_DEBUG_DRIVER("Failed to configure crc source");
9645 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9646 if (new_crtc_state->async_flip)
9647 wait_for_vblank = false;
9649 /* update planes when needed per crtc*/
9650 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9651 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9653 if (dm_new_crtc_state->stream)
9654 amdgpu_dm_commit_planes(state, dc_state, dev,
9655 dm, crtc, wait_for_vblank);
9658 /* Update audio instances for each connector. */
9659 amdgpu_dm_commit_audio(dev, state);
9661 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9662 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9663 /* restore the backlight level */
9664 for (i = 0; i < dm->num_of_edps; i++) {
9665 if (dm->backlight_dev[i] &&
9666 (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9667 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9671 * send vblank event on all events not handled in flip and
9672 * mark consumed event for drm_atomic_helper_commit_hw_done
9674 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9675 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9677 if (new_crtc_state->event)
9678 drm_send_event_locked(dev, &new_crtc_state->event->base);
9680 new_crtc_state->event = NULL;
9682 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9684 /* Signal HW programming completion */
9685 drm_atomic_helper_commit_hw_done(state);
9687 if (wait_for_vblank)
9688 drm_atomic_helper_wait_for_flip_done(dev, state);
9690 drm_atomic_helper_cleanup_planes(dev, state);
9692 /* return the stolen vga memory back to VRAM */
9693 if (!adev->mman.keep_stolen_vga_memory)
9694 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9695 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9698 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9699 * so we can put the GPU into runtime suspend if we're not driving any
9702 for (i = 0; i < crtc_disable_count; i++)
9703 pm_runtime_put_autosuspend(dev->dev);
9704 pm_runtime_mark_last_busy(dev->dev);
9707 dc_release_state(dc_state_temp);
9711 static int dm_force_atomic_commit(struct drm_connector *connector)
9714 struct drm_device *ddev = connector->dev;
9715 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9716 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9717 struct drm_plane *plane = disconnected_acrtc->base.primary;
9718 struct drm_connector_state *conn_state;
9719 struct drm_crtc_state *crtc_state;
9720 struct drm_plane_state *plane_state;
9725 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9727 /* Construct an atomic state to restore previous display setting */
9730 * Attach connectors to drm_atomic_state
9732 conn_state = drm_atomic_get_connector_state(state, connector);
9734 ret = PTR_ERR_OR_ZERO(conn_state);
9738 /* Attach crtc to drm_atomic_state*/
9739 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9741 ret = PTR_ERR_OR_ZERO(crtc_state);
9745 /* force a restore */
9746 crtc_state->mode_changed = true;
9748 /* Attach plane to drm_atomic_state */
9749 plane_state = drm_atomic_get_plane_state(state, plane);
9751 ret = PTR_ERR_OR_ZERO(plane_state);
9755 /* Call commit internally with the state we just constructed */
9756 ret = drm_atomic_commit(state);
9759 drm_atomic_state_put(state);
9761 DRM_ERROR("Restoring old state failed with %i\n", ret);
9767 * This function handles all cases when set mode does not come upon hotplug.
9768 * This includes when a display is unplugged then plugged back into the
9769 * same port and when running without usermode desktop manager supprot
9771 void dm_restore_drm_connector_state(struct drm_device *dev,
9772 struct drm_connector *connector)
9774 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9775 struct amdgpu_crtc *disconnected_acrtc;
9776 struct dm_crtc_state *acrtc_state;
9778 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9781 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9782 if (!disconnected_acrtc)
9785 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9786 if (!acrtc_state->stream)
9790 * If the previous sink is not released and different from the current,
9791 * we deduce we are in a state where we can not rely on usermode call
9792 * to turn on the display, so we do it here
9794 if (acrtc_state->stream->sink != aconnector->dc_sink)
9795 dm_force_atomic_commit(&aconnector->base);
9799 * Grabs all modesetting locks to serialize against any blocking commits,
9800 * Waits for completion of all non blocking commits.
9802 static int do_aquire_global_lock(struct drm_device *dev,
9803 struct drm_atomic_state *state)
9805 struct drm_crtc *crtc;
9806 struct drm_crtc_commit *commit;
9810 * Adding all modeset locks to aquire_ctx will
9811 * ensure that when the framework release it the
9812 * extra locks we are locking here will get released to
9814 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9818 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9819 spin_lock(&crtc->commit_lock);
9820 commit = list_first_entry_or_null(&crtc->commit_list,
9821 struct drm_crtc_commit, commit_entry);
9823 drm_crtc_commit_get(commit);
9824 spin_unlock(&crtc->commit_lock);
9830 * Make sure all pending HW programming completed and
9833 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9836 ret = wait_for_completion_interruptible_timeout(
9837 &commit->flip_done, 10*HZ);
9840 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9841 "timed out\n", crtc->base.id, crtc->name);
9843 drm_crtc_commit_put(commit);
9846 return ret < 0 ? ret : 0;
9849 static void get_freesync_config_for_crtc(
9850 struct dm_crtc_state *new_crtc_state,
9851 struct dm_connector_state *new_con_state)
9853 struct mod_freesync_config config = {0};
9854 struct amdgpu_dm_connector *aconnector =
9855 to_amdgpu_dm_connector(new_con_state->base.connector);
9856 struct drm_display_mode *mode = &new_crtc_state->base.mode;
9857 int vrefresh = drm_mode_vrefresh(mode);
9858 bool fs_vid_mode = false;
9860 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9861 vrefresh >= aconnector->min_vfreq &&
9862 vrefresh <= aconnector->max_vfreq;
9864 if (new_crtc_state->vrr_supported) {
9865 new_crtc_state->stream->ignore_msa_timing_param = true;
9866 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9868 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9869 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9870 config.vsif_supported = true;
9874 config.state = VRR_STATE_ACTIVE_FIXED;
9875 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9877 } else if (new_crtc_state->base.vrr_enabled) {
9878 config.state = VRR_STATE_ACTIVE_VARIABLE;
9880 config.state = VRR_STATE_INACTIVE;
9884 new_crtc_state->freesync_config = config;
9887 static void reset_freesync_config_for_crtc(
9888 struct dm_crtc_state *new_crtc_state)
9890 new_crtc_state->vrr_supported = false;
9892 memset(&new_crtc_state->vrr_infopacket, 0,
9893 sizeof(new_crtc_state->vrr_infopacket));
9897 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9898 struct drm_crtc_state *new_crtc_state)
9900 struct drm_display_mode old_mode, new_mode;
9902 if (!old_crtc_state || !new_crtc_state)
9905 old_mode = old_crtc_state->mode;
9906 new_mode = new_crtc_state->mode;
9908 if (old_mode.clock == new_mode.clock &&
9909 old_mode.hdisplay == new_mode.hdisplay &&
9910 old_mode.vdisplay == new_mode.vdisplay &&
9911 old_mode.htotal == new_mode.htotal &&
9912 old_mode.vtotal != new_mode.vtotal &&
9913 old_mode.hsync_start == new_mode.hsync_start &&
9914 old_mode.vsync_start != new_mode.vsync_start &&
9915 old_mode.hsync_end == new_mode.hsync_end &&
9916 old_mode.vsync_end != new_mode.vsync_end &&
9917 old_mode.hskew == new_mode.hskew &&
9918 old_mode.vscan == new_mode.vscan &&
9919 (old_mode.vsync_end - old_mode.vsync_start) ==
9920 (new_mode.vsync_end - new_mode.vsync_start))
9926 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9927 uint64_t num, den, res;
9928 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9930 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9932 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9933 den = (unsigned long long)new_crtc_state->mode.htotal *
9934 (unsigned long long)new_crtc_state->mode.vtotal;
9936 res = div_u64(num, den);
9937 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9940 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9941 struct drm_atomic_state *state,
9942 struct drm_crtc *crtc,
9943 struct drm_crtc_state *old_crtc_state,
9944 struct drm_crtc_state *new_crtc_state,
9946 bool *lock_and_validation_needed)
9948 struct dm_atomic_state *dm_state = NULL;
9949 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9950 struct dc_stream_state *new_stream;
9954 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9955 * update changed items
9957 struct amdgpu_crtc *acrtc = NULL;
9958 struct amdgpu_dm_connector *aconnector = NULL;
9959 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9960 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9964 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9965 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9966 acrtc = to_amdgpu_crtc(crtc);
9967 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9969 /* TODO This hack should go away */
9970 if (aconnector && enable) {
9971 /* Make sure fake sink is created in plug-in scenario */
9972 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9974 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9977 if (IS_ERR(drm_new_conn_state)) {
9978 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9982 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9983 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9985 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9988 new_stream = create_validate_stream_for_sink(aconnector,
9989 &new_crtc_state->mode,
9991 dm_old_crtc_state->stream);
9994 * we can have no stream on ACTION_SET if a display
9995 * was disconnected during S3, in this case it is not an
9996 * error, the OS will be updated after detection, and
9997 * will do the right thing on next atomic commit
10001 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10002 __func__, acrtc->base.base.id);
10008 * TODO: Check VSDB bits to decide whether this should
10009 * be enabled or not.
10011 new_stream->triggered_crtc_reset.enabled =
10012 dm->force_timing_sync;
10014 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10016 ret = fill_hdr_info_packet(drm_new_conn_state,
10017 &new_stream->hdr_static_metadata);
10022 * If we already removed the old stream from the context
10023 * (and set the new stream to NULL) then we can't reuse
10024 * the old stream even if the stream and scaling are unchanged.
10025 * We'll hit the BUG_ON and black screen.
10027 * TODO: Refactor this function to allow this check to work
10028 * in all conditions.
10030 if (amdgpu_freesync_vid_mode &&
10031 dm_new_crtc_state->stream &&
10032 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10035 if (dm_new_crtc_state->stream &&
10036 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10037 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10038 new_crtc_state->mode_changed = false;
10039 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10040 new_crtc_state->mode_changed);
10044 /* mode_changed flag may get updated above, need to check again */
10045 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10049 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10050 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10051 "connectors_changed:%d\n",
10053 new_crtc_state->enable,
10054 new_crtc_state->active,
10055 new_crtc_state->planes_changed,
10056 new_crtc_state->mode_changed,
10057 new_crtc_state->active_changed,
10058 new_crtc_state->connectors_changed);
10060 /* Remove stream for any changed/disabled CRTC */
10063 if (!dm_old_crtc_state->stream)
10066 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10067 is_timing_unchanged_for_freesync(new_crtc_state,
10069 new_crtc_state->mode_changed = false;
10071 "Mode change not required for front porch change, "
10072 "setting mode_changed to %d",
10073 new_crtc_state->mode_changed);
10075 set_freesync_fixed_config(dm_new_crtc_state);
10078 } else if (amdgpu_freesync_vid_mode && aconnector &&
10079 is_freesync_video_mode(&new_crtc_state->mode,
10081 struct drm_display_mode *high_mode;
10083 high_mode = get_highest_refresh_rate_mode(aconnector, false);
10084 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10085 set_freesync_fixed_config(dm_new_crtc_state);
10089 ret = dm_atomic_get_state(state, &dm_state);
10093 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10096 /* i.e. reset mode */
10097 if (dc_remove_stream_from_ctx(
10100 dm_old_crtc_state->stream) != DC_OK) {
10105 dc_stream_release(dm_old_crtc_state->stream);
10106 dm_new_crtc_state->stream = NULL;
10108 reset_freesync_config_for_crtc(dm_new_crtc_state);
10110 *lock_and_validation_needed = true;
10112 } else {/* Add stream for any updated/enabled CRTC */
10114 * Quick fix to prevent NULL pointer on new_stream when
10115 * added MST connectors not found in existing crtc_state in the chained mode
10116 * TODO: need to dig out the root cause of that
10118 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10121 if (modereset_required(new_crtc_state))
10124 if (modeset_required(new_crtc_state, new_stream,
10125 dm_old_crtc_state->stream)) {
10127 WARN_ON(dm_new_crtc_state->stream);
10129 ret = dm_atomic_get_state(state, &dm_state);
10133 dm_new_crtc_state->stream = new_stream;
10135 dc_stream_retain(new_stream);
10137 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10140 if (dc_add_stream_to_ctx(
10143 dm_new_crtc_state->stream) != DC_OK) {
10148 *lock_and_validation_needed = true;
10153 /* Release extra reference */
10155 dc_stream_release(new_stream);
10158 * We want to do dc stream updates that do not require a
10159 * full modeset below.
10161 if (!(enable && aconnector && new_crtc_state->active))
10164 * Given above conditions, the dc state cannot be NULL because:
10165 * 1. We're in the process of enabling CRTCs (just been added
10166 * to the dc context, or already is on the context)
10167 * 2. Has a valid connector attached, and
10168 * 3. Is currently active and enabled.
10169 * => The dc stream state currently exists.
10171 BUG_ON(dm_new_crtc_state->stream == NULL);
10173 /* Scaling or underscan settings */
10174 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10175 drm_atomic_crtc_needs_modeset(new_crtc_state))
10176 update_stream_scaling_settings(
10177 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10180 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10183 * Color management settings. We also update color properties
10184 * when a modeset is needed, to ensure it gets reprogrammed.
10186 if (dm_new_crtc_state->base.color_mgmt_changed ||
10187 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10188 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10193 /* Update Freesync settings. */
10194 get_freesync_config_for_crtc(dm_new_crtc_state,
10195 dm_new_conn_state);
10201 dc_stream_release(new_stream);
10205 static bool should_reset_plane(struct drm_atomic_state *state,
10206 struct drm_plane *plane,
10207 struct drm_plane_state *old_plane_state,
10208 struct drm_plane_state *new_plane_state)
10210 struct drm_plane *other;
10211 struct drm_plane_state *old_other_state, *new_other_state;
10212 struct drm_crtc_state *new_crtc_state;
10216 * TODO: Remove this hack once the checks below are sufficient
10217 * enough to determine when we need to reset all the planes on
10220 if (state->allow_modeset)
10223 /* Exit early if we know that we're adding or removing the plane. */
10224 if (old_plane_state->crtc != new_plane_state->crtc)
10227 /* old crtc == new_crtc == NULL, plane not in context. */
10228 if (!new_plane_state->crtc)
10232 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10234 if (!new_crtc_state)
10237 /* CRTC Degamma changes currently require us to recreate planes. */
10238 if (new_crtc_state->color_mgmt_changed)
10241 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10245 * If there are any new primary or overlay planes being added or
10246 * removed then the z-order can potentially change. To ensure
10247 * correct z-order and pipe acquisition the current DC architecture
10248 * requires us to remove and recreate all existing planes.
10250 * TODO: Come up with a more elegant solution for this.
10252 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10253 struct amdgpu_framebuffer *old_afb, *new_afb;
10254 if (other->type == DRM_PLANE_TYPE_CURSOR)
10257 if (old_other_state->crtc != new_plane_state->crtc &&
10258 new_other_state->crtc != new_plane_state->crtc)
10261 if (old_other_state->crtc != new_other_state->crtc)
10264 /* Src/dst size and scaling updates. */
10265 if (old_other_state->src_w != new_other_state->src_w ||
10266 old_other_state->src_h != new_other_state->src_h ||
10267 old_other_state->crtc_w != new_other_state->crtc_w ||
10268 old_other_state->crtc_h != new_other_state->crtc_h)
10271 /* Rotation / mirroring updates. */
10272 if (old_other_state->rotation != new_other_state->rotation)
10275 /* Blending updates. */
10276 if (old_other_state->pixel_blend_mode !=
10277 new_other_state->pixel_blend_mode)
10280 /* Alpha updates. */
10281 if (old_other_state->alpha != new_other_state->alpha)
10284 /* Colorspace changes. */
10285 if (old_other_state->color_range != new_other_state->color_range ||
10286 old_other_state->color_encoding != new_other_state->color_encoding)
10289 /* Framebuffer checks fall at the end. */
10290 if (!old_other_state->fb || !new_other_state->fb)
10293 /* Pixel format changes can require bandwidth updates. */
10294 if (old_other_state->fb->format != new_other_state->fb->format)
10297 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10298 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10300 /* Tiling and DCC changes also require bandwidth updates. */
10301 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10302 old_afb->base.modifier != new_afb->base.modifier)
10309 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10310 struct drm_plane_state *new_plane_state,
10311 struct drm_framebuffer *fb)
10313 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10314 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10315 unsigned int pitch;
10318 if (fb->width > new_acrtc->max_cursor_width ||
10319 fb->height > new_acrtc->max_cursor_height) {
10320 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10321 new_plane_state->fb->width,
10322 new_plane_state->fb->height);
10325 if (new_plane_state->src_w != fb->width << 16 ||
10326 new_plane_state->src_h != fb->height << 16) {
10327 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10331 /* Pitch in pixels */
10332 pitch = fb->pitches[0] / fb->format->cpp[0];
10334 if (fb->width != pitch) {
10335 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10344 /* FB pitch is supported by cursor plane */
10347 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10351 /* Core DRM takes care of checking FB modifiers, so we only need to
10352 * check tiling flags when the FB doesn't have a modifier. */
10353 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10354 if (adev->family < AMDGPU_FAMILY_AI) {
10355 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10356 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10357 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10359 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10362 DRM_DEBUG_ATOMIC("Cursor FB not linear");
10370 static int dm_update_plane_state(struct dc *dc,
10371 struct drm_atomic_state *state,
10372 struct drm_plane *plane,
10373 struct drm_plane_state *old_plane_state,
10374 struct drm_plane_state *new_plane_state,
10376 bool *lock_and_validation_needed)
10379 struct dm_atomic_state *dm_state = NULL;
10380 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10381 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10382 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10383 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10384 struct amdgpu_crtc *new_acrtc;
10389 new_plane_crtc = new_plane_state->crtc;
10390 old_plane_crtc = old_plane_state->crtc;
10391 dm_new_plane_state = to_dm_plane_state(new_plane_state);
10392 dm_old_plane_state = to_dm_plane_state(old_plane_state);
10394 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10395 if (!enable || !new_plane_crtc ||
10396 drm_atomic_plane_disabling(plane->state, new_plane_state))
10399 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10401 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10402 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10406 if (new_plane_state->fb) {
10407 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10408 new_plane_state->fb);
10416 needs_reset = should_reset_plane(state, plane, old_plane_state,
10419 /* Remove any changed/removed planes */
10424 if (!old_plane_crtc)
10427 old_crtc_state = drm_atomic_get_old_crtc_state(
10428 state, old_plane_crtc);
10429 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10431 if (!dm_old_crtc_state->stream)
10434 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10435 plane->base.id, old_plane_crtc->base.id);
10437 ret = dm_atomic_get_state(state, &dm_state);
10441 if (!dc_remove_plane_from_context(
10443 dm_old_crtc_state->stream,
10444 dm_old_plane_state->dc_state,
10445 dm_state->context)) {
10451 dc_plane_state_release(dm_old_plane_state->dc_state);
10452 dm_new_plane_state->dc_state = NULL;
10454 *lock_and_validation_needed = true;
10456 } else { /* Add new planes */
10457 struct dc_plane_state *dc_new_plane_state;
10459 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10462 if (!new_plane_crtc)
10465 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10466 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10468 if (!dm_new_crtc_state->stream)
10474 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10478 WARN_ON(dm_new_plane_state->dc_state);
10480 dc_new_plane_state = dc_create_plane_state(dc);
10481 if (!dc_new_plane_state)
10484 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10485 plane->base.id, new_plane_crtc->base.id);
10487 ret = fill_dc_plane_attributes(
10488 drm_to_adev(new_plane_crtc->dev),
10489 dc_new_plane_state,
10493 dc_plane_state_release(dc_new_plane_state);
10497 ret = dm_atomic_get_state(state, &dm_state);
10499 dc_plane_state_release(dc_new_plane_state);
10504 * Any atomic check errors that occur after this will
10505 * not need a release. The plane state will be attached
10506 * to the stream, and therefore part of the atomic
10507 * state. It'll be released when the atomic state is
10510 if (!dc_add_plane_to_context(
10512 dm_new_crtc_state->stream,
10513 dc_new_plane_state,
10514 dm_state->context)) {
10516 dc_plane_state_release(dc_new_plane_state);
10520 dm_new_plane_state->dc_state = dc_new_plane_state;
10522 /* Tell DC to do a full surface update every time there
10523 * is a plane change. Inefficient, but works for now.
10525 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10527 *lock_and_validation_needed = true;
10534 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10535 struct drm_crtc *crtc,
10536 struct drm_crtc_state *new_crtc_state)
10538 struct drm_plane_state *new_cursor_state, *new_primary_state;
10539 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10541 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10542 * cursor per pipe but it's going to inherit the scaling and
10543 * positioning from the underlying pipe. Check the cursor plane's
10544 * blending properties match the primary plane's. */
10546 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10547 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
10548 if (!new_cursor_state || !new_primary_state ||
10549 !new_cursor_state->fb || !new_primary_state->fb) {
10553 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10554 (new_cursor_state->src_w >> 16);
10555 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10556 (new_cursor_state->src_h >> 16);
10558 primary_scale_w = new_primary_state->crtc_w * 1000 /
10559 (new_primary_state->src_w >> 16);
10560 primary_scale_h = new_primary_state->crtc_h * 1000 /
10561 (new_primary_state->src_h >> 16);
10563 if (cursor_scale_w != primary_scale_w ||
10564 cursor_scale_h != primary_scale_h) {
10565 drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
10572 #if defined(CONFIG_DRM_AMD_DC_DCN)
10573 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10575 struct drm_connector *connector;
10576 struct drm_connector_state *conn_state;
10577 struct amdgpu_dm_connector *aconnector = NULL;
10579 for_each_new_connector_in_state(state, connector, conn_state, i) {
10580 if (conn_state->crtc != crtc)
10583 aconnector = to_amdgpu_dm_connector(connector);
10584 if (!aconnector->port || !aconnector->mst_port)
10593 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10597 static int validate_overlay(struct drm_atomic_state *state)
10600 struct drm_plane *plane;
10601 struct drm_plane_state *new_plane_state;
10602 struct drm_plane_state *primary_state, *overlay_state = NULL;
10604 /* Check if primary plane is contained inside overlay */
10605 for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
10606 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10607 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10610 overlay_state = new_plane_state;
10615 /* check if we're making changes to the overlay plane */
10616 if (!overlay_state)
10619 /* check if overlay plane is enabled */
10620 if (!overlay_state->crtc)
10623 /* find the primary plane for the CRTC that the overlay is enabled on */
10624 primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10625 if (IS_ERR(primary_state))
10626 return PTR_ERR(primary_state);
10628 /* check if primary plane is enabled */
10629 if (!primary_state->crtc)
10632 /* Perform the bounds check to ensure the overlay plane covers the primary */
10633 if (primary_state->crtc_x < overlay_state->crtc_x ||
10634 primary_state->crtc_y < overlay_state->crtc_y ||
10635 primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10636 primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10637 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10645 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10646 * @dev: The DRM device
10647 * @state: The atomic state to commit
10649 * Validate that the given atomic state is programmable by DC into hardware.
10650 * This involves constructing a &struct dc_state reflecting the new hardware
10651 * state we wish to commit, then querying DC to see if it is programmable. It's
10652 * important not to modify the existing DC state. Otherwise, atomic_check
10653 * may unexpectedly commit hardware changes.
10655 * When validating the DC state, it's important that the right locks are
10656 * acquired. For full updates case which removes/adds/updates streams on one
10657 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10658 * that any such full update commit will wait for completion of any outstanding
10659 * flip using DRMs synchronization events.
10661 * Note that DM adds the affected connectors for all CRTCs in state, when that
10662 * might not seem necessary. This is because DC stream creation requires the
10663 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10664 * be possible but non-trivial - a possible TODO item.
10666 * Return: -Error code if validation failed.
10668 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10669 struct drm_atomic_state *state)
10671 struct amdgpu_device *adev = drm_to_adev(dev);
10672 struct dm_atomic_state *dm_state = NULL;
10673 struct dc *dc = adev->dm.dc;
10674 struct drm_connector *connector;
10675 struct drm_connector_state *old_con_state, *new_con_state;
10676 struct drm_crtc *crtc;
10677 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10678 struct drm_plane *plane;
10679 struct drm_plane_state *old_plane_state, *new_plane_state;
10680 enum dc_status status;
10682 bool lock_and_validation_needed = false;
10683 struct dm_crtc_state *dm_old_crtc_state;
10684 #if defined(CONFIG_DRM_AMD_DC_DCN)
10685 struct dsc_mst_fairness_vars vars[MAX_PIPES];
10686 struct drm_dp_mst_topology_state *mst_state;
10687 struct drm_dp_mst_topology_mgr *mgr;
10690 trace_amdgpu_dm_atomic_check_begin(state);
10692 ret = drm_atomic_helper_check_modeset(dev, state);
10696 /* Check connector changes */
10697 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10698 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10699 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10701 /* Skip connectors that are disabled or part of modeset already. */
10702 if (!old_con_state->crtc && !new_con_state->crtc)
10705 if (!new_con_state->crtc)
10708 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10709 if (IS_ERR(new_crtc_state)) {
10710 ret = PTR_ERR(new_crtc_state);
10714 if (dm_old_con_state->abm_level !=
10715 dm_new_con_state->abm_level)
10716 new_crtc_state->connectors_changed = true;
10719 #if defined(CONFIG_DRM_AMD_DC_DCN)
10720 if (dc_resource_is_dsc_encoding_supported(dc)) {
10721 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10722 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10723 ret = add_affected_mst_dsc_crtcs(state, crtc);
10730 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10731 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10733 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10734 !new_crtc_state->color_mgmt_changed &&
10735 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10736 dm_old_crtc_state->dsc_force_changed == false)
10739 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10743 if (!new_crtc_state->enable)
10746 ret = drm_atomic_add_affected_connectors(state, crtc);
10750 ret = drm_atomic_add_affected_planes(state, crtc);
10754 if (dm_old_crtc_state->dsc_force_changed)
10755 new_crtc_state->mode_changed = true;
10759 * Add all primary and overlay planes on the CRTC to the state
10760 * whenever a plane is enabled to maintain correct z-ordering
10761 * and to enable fast surface updates.
10763 drm_for_each_crtc(crtc, dev) {
10764 bool modified = false;
10766 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10767 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10770 if (new_plane_state->crtc == crtc ||
10771 old_plane_state->crtc == crtc) {
10780 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10781 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10785 drm_atomic_get_plane_state(state, plane);
10787 if (IS_ERR(new_plane_state)) {
10788 ret = PTR_ERR(new_plane_state);
10794 /* Remove exiting planes if they are modified */
10795 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10796 ret = dm_update_plane_state(dc, state, plane,
10800 &lock_and_validation_needed);
10805 /* Disable all crtcs which require disable */
10806 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10807 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10811 &lock_and_validation_needed);
10816 /* Enable all crtcs which require enable */
10817 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10818 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10822 &lock_and_validation_needed);
10827 ret = validate_overlay(state);
10831 /* Add new/modified planes */
10832 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10833 ret = dm_update_plane_state(dc, state, plane,
10837 &lock_and_validation_needed);
10842 /* Run this here since we want to validate the streams we created */
10843 ret = drm_atomic_helper_check_planes(dev, state);
10847 /* Check cursor planes scaling */
10848 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10849 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10854 if (state->legacy_cursor_update) {
10856 * This is a fast cursor update coming from the plane update
10857 * helper, check if it can be done asynchronously for better
10860 state->async_update =
10861 !drm_atomic_helper_async_check(dev, state);
10864 * Skip the remaining global validation if this is an async
10865 * update. Cursor updates can be done without affecting
10866 * state or bandwidth calcs and this avoids the performance
10867 * penalty of locking the private state object and
10868 * allocating a new dc_state.
10870 if (state->async_update)
10874 /* Check scaling and underscan changes*/
10875 /* TODO Removed scaling changes validation due to inability to commit
10876 * new stream into context w\o causing full reset. Need to
10877 * decide how to handle.
10879 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10880 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10881 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10882 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10884 /* Skip any modesets/resets */
10885 if (!acrtc || drm_atomic_crtc_needs_modeset(
10886 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10889 /* Skip any thing not scale or underscan changes */
10890 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10893 lock_and_validation_needed = true;
10896 #if defined(CONFIG_DRM_AMD_DC_DCN)
10897 /* set the slot info for each mst_state based on the link encoding format */
10898 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
10899 struct amdgpu_dm_connector *aconnector;
10900 struct drm_connector *connector;
10901 struct drm_connector_list_iter iter;
10902 u8 link_coding_cap;
10904 if (!mgr->mst_state )
10907 drm_connector_list_iter_begin(dev, &iter);
10908 drm_for_each_connector_iter(connector, &iter) {
10909 int id = connector->index;
10911 if (id == mst_state->mgr->conn_base_id) {
10912 aconnector = to_amdgpu_dm_connector(connector);
10913 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
10914 drm_dp_mst_update_slots(mst_state, link_coding_cap);
10919 drm_connector_list_iter_end(&iter);
10924 * Streams and planes are reset when there are changes that affect
10925 * bandwidth. Anything that affects bandwidth needs to go through
10926 * DC global validation to ensure that the configuration can be applied
10929 * We have to currently stall out here in atomic_check for outstanding
10930 * commits to finish in this case because our IRQ handlers reference
10931 * DRM state directly - we can end up disabling interrupts too early
10934 * TODO: Remove this stall and drop DM state private objects.
10936 if (lock_and_validation_needed) {
10937 ret = dm_atomic_get_state(state, &dm_state);
10941 ret = do_aquire_global_lock(dev, state);
10945 #if defined(CONFIG_DRM_AMD_DC_DCN)
10946 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
10949 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
10955 * Perform validation of MST topology in the state:
10956 * We need to perform MST atomic check before calling
10957 * dc_validate_global_state(), or there is a chance
10958 * to get stuck in an infinite loop and hang eventually.
10960 ret = drm_dp_mst_atomic_check(state);
10963 status = dc_validate_global_state(dc, dm_state->context, false);
10964 if (status != DC_OK) {
10965 drm_dbg_atomic(dev,
10966 "DC global validation failure: %s (%d)",
10967 dc_status_to_str(status), status);
10973 * The commit is a fast update. Fast updates shouldn't change
10974 * the DC context, affect global validation, and can have their
10975 * commit work done in parallel with other commits not touching
10976 * the same resource. If we have a new DC context as part of
10977 * the DM atomic state from validation we need to free it and
10978 * retain the existing one instead.
10980 * Furthermore, since the DM atomic state only contains the DC
10981 * context and can safely be annulled, we can free the state
10982 * and clear the associated private object now to free
10983 * some memory and avoid a possible use-after-free later.
10986 for (i = 0; i < state->num_private_objs; i++) {
10987 struct drm_private_obj *obj = state->private_objs[i].ptr;
10989 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10990 int j = state->num_private_objs-1;
10992 dm_atomic_destroy_state(obj,
10993 state->private_objs[i].state);
10995 /* If i is not at the end of the array then the
10996 * last element needs to be moved to where i was
10997 * before the array can safely be truncated.
11000 state->private_objs[i] =
11001 state->private_objs[j];
11003 state->private_objs[j].ptr = NULL;
11004 state->private_objs[j].state = NULL;
11005 state->private_objs[j].old_state = NULL;
11006 state->private_objs[j].new_state = NULL;
11008 state->num_private_objs = j;
11014 /* Store the overall update type for use later in atomic check. */
11015 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11016 struct dm_crtc_state *dm_new_crtc_state =
11017 to_dm_crtc_state(new_crtc_state);
11019 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11024 /* Must be success */
11027 trace_amdgpu_dm_atomic_check_finish(state, ret);
11032 if (ret == -EDEADLK)
11033 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11034 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11035 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11037 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11039 trace_amdgpu_dm_atomic_check_finish(state, ret);
11044 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11045 struct amdgpu_dm_connector *amdgpu_dm_connector)
11048 bool capable = false;
11050 if (amdgpu_dm_connector->dc_link &&
11051 dm_helpers_dp_read_dpcd(
11053 amdgpu_dm_connector->dc_link,
11054 DP_DOWN_STREAM_PORT_COUNT,
11056 sizeof(dpcd_data))) {
11057 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11063 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11064 unsigned int offset,
11065 unsigned int total_length,
11067 unsigned int length,
11068 struct amdgpu_hdmi_vsdb_info *vsdb)
11071 union dmub_rb_cmd cmd;
11072 struct dmub_cmd_send_edid_cea *input;
11073 struct dmub_cmd_edid_cea_output *output;
11075 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11078 memset(&cmd, 0, sizeof(cmd));
11080 input = &cmd.edid_cea.data.input;
11082 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11083 cmd.edid_cea.header.sub_type = 0;
11084 cmd.edid_cea.header.payload_bytes =
11085 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11086 input->offset = offset;
11087 input->length = length;
11088 input->total_length = total_length;
11089 memcpy(input->payload, data, length);
11091 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11093 DRM_ERROR("EDID CEA parser failed\n");
11097 output = &cmd.edid_cea.data.output;
11099 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11100 if (!output->ack.success) {
11101 DRM_ERROR("EDID CEA ack failed at offset %d\n",
11102 output->ack.offset);
11104 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11105 if (!output->amd_vsdb.vsdb_found)
11108 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11109 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11110 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11111 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11113 DRM_WARN("Unknown EDID CEA parser results\n");
11120 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11121 uint8_t *edid_ext, int len,
11122 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11126 /* send extension block to DMCU for parsing */
11127 for (i = 0; i < len; i += 8) {
11131 /* send 8 bytes a time */
11132 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11136 /* EDID block sent completed, expect result */
11137 int version, min_rate, max_rate;
11139 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11141 /* amd vsdb found */
11142 vsdb_info->freesync_supported = 1;
11143 vsdb_info->amd_vsdb_version = version;
11144 vsdb_info->min_refresh_rate_hz = min_rate;
11145 vsdb_info->max_refresh_rate_hz = max_rate;
11153 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11161 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11162 uint8_t *edid_ext, int len,
11163 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11167 /* send extension block to DMCU for parsing */
11168 for (i = 0; i < len; i += 8) {
11169 /* send 8 bytes a time */
11170 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11174 return vsdb_info->freesync_supported;
11177 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11178 uint8_t *edid_ext, int len,
11179 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11181 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11183 if (adev->dm.dmub_srv)
11184 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11186 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11189 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11190 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11192 uint8_t *edid_ext = NULL;
11194 bool valid_vsdb_found = false;
11196 /*----- drm_find_cea_extension() -----*/
11197 /* No EDID or EDID extensions */
11198 if (edid == NULL || edid->extensions == 0)
11201 /* Find CEA extension */
11202 for (i = 0; i < edid->extensions; i++) {
11203 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11204 if (edid_ext[0] == CEA_EXT)
11208 if (i == edid->extensions)
11211 /*----- cea_db_offsets() -----*/
11212 if (edid_ext[0] != CEA_EXT)
11215 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11217 return valid_vsdb_found ? i : -ENODEV;
11220 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11224 struct detailed_timing *timing;
11225 struct detailed_non_pixel *data;
11226 struct detailed_data_monitor_range *range;
11227 struct amdgpu_dm_connector *amdgpu_dm_connector =
11228 to_amdgpu_dm_connector(connector);
11229 struct dm_connector_state *dm_con_state = NULL;
11230 struct dc_sink *sink;
11232 struct drm_device *dev = connector->dev;
11233 struct amdgpu_device *adev = drm_to_adev(dev);
11234 bool freesync_capable = false;
11235 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11237 if (!connector->state) {
11238 DRM_ERROR("%s - Connector has no state", __func__);
11242 sink = amdgpu_dm_connector->dc_sink ?
11243 amdgpu_dm_connector->dc_sink :
11244 amdgpu_dm_connector->dc_em_sink;
11246 if (!edid || !sink) {
11247 dm_con_state = to_dm_connector_state(connector->state);
11249 amdgpu_dm_connector->min_vfreq = 0;
11250 amdgpu_dm_connector->max_vfreq = 0;
11251 amdgpu_dm_connector->pixel_clock_mhz = 0;
11252 connector->display_info.monitor_range.min_vfreq = 0;
11253 connector->display_info.monitor_range.max_vfreq = 0;
11254 freesync_capable = false;
11259 dm_con_state = to_dm_connector_state(connector->state);
11261 if (!adev->dm.freesync_module)
11265 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11266 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11267 bool edid_check_required = false;
11270 edid_check_required = is_dp_capable_without_timing_msa(
11272 amdgpu_dm_connector);
11275 if (edid_check_required == true && (edid->version > 1 ||
11276 (edid->version == 1 && edid->revision > 1))) {
11277 for (i = 0; i < 4; i++) {
11279 timing = &edid->detailed_timings[i];
11280 data = &timing->data.other_data;
11281 range = &data->data.range;
11283 * Check if monitor has continuous frequency mode
11285 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11288 * Check for flag range limits only. If flag == 1 then
11289 * no additional timing information provided.
11290 * Default GTF, GTF Secondary curve and CVT are not
11293 if (range->flags != 1)
11296 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11297 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11298 amdgpu_dm_connector->pixel_clock_mhz =
11299 range->pixel_clock_mhz * 10;
11301 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11302 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11307 if (amdgpu_dm_connector->max_vfreq -
11308 amdgpu_dm_connector->min_vfreq > 10) {
11310 freesync_capable = true;
11313 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11314 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11315 if (i >= 0 && vsdb_info.freesync_supported) {
11316 timing = &edid->detailed_timings[i];
11317 data = &timing->data.other_data;
11319 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11320 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11321 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11322 freesync_capable = true;
11324 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11325 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11331 dm_con_state->freesync_capable = freesync_capable;
11333 if (connector->vrr_capable_property)
11334 drm_connector_set_vrr_capable_property(connector,
11338 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11340 struct amdgpu_device *adev = drm_to_adev(dev);
11341 struct dc *dc = adev->dm.dc;
11344 mutex_lock(&adev->dm.dc_lock);
11345 if (dc->current_state) {
11346 for (i = 0; i < dc->current_state->stream_count; ++i)
11347 dc->current_state->streams[i]
11348 ->triggered_crtc_reset.enabled =
11349 adev->dm.force_timing_sync;
11351 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11352 dc_trigger_sync(dc, dc->current_state);
11354 mutex_unlock(&adev->dm.dc_lock);
11357 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11358 uint32_t value, const char *func_name)
11360 #ifdef DM_CHECK_ADDR_0
11361 if (address == 0) {
11362 DC_ERR("invalid register write. address = 0");
11366 cgs_write_register(ctx->cgs_device, address, value);
11367 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11370 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11371 const char *func_name)
11374 #ifdef DM_CHECK_ADDR_0
11375 if (address == 0) {
11376 DC_ERR("invalid register read; address = 0\n");
11381 if (ctx->dmub_srv &&
11382 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11383 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11388 value = cgs_read_register(ctx->cgs_device, address);
11390 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11395 int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
11396 uint8_t status_type, uint32_t *operation_result)
11398 struct amdgpu_device *adev = ctx->driver_context;
11399 int return_status = -1;
11400 struct dmub_notification *p_notify = adev->dm.dmub_notify;
11403 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11404 return_status = p_notify->aux_reply.length;
11405 *operation_result = p_notify->result;
11406 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11407 *operation_result = AUX_RET_ERROR_TIMEOUT;
11408 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11409 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11411 *operation_result = AUX_RET_ERROR_UNKNOWN;
11414 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11416 *operation_result = p_notify->sc_status;
11418 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11422 return return_status;
11425 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11426 unsigned int link_index, void *cmd_payload, void *operation_result)
11428 struct amdgpu_device *adev = ctx->driver_context;
11432 dc_process_dmub_aux_transfer_async(ctx->dc,
11433 link_index, (struct aux_payload *)cmd_payload);
11434 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11435 (struct set_config_cmd_payload *)cmd_payload,
11436 adev->dm.dmub_notify)) {
11437 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11438 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11439 (uint32_t *)operation_result);
11442 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11444 DRM_ERROR("wait_for_completion_timeout timeout!");
11445 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11446 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11447 (uint32_t *)operation_result);
11451 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11452 struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11454 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11455 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11456 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11457 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11458 adev->dm.dmub_notify->aux_reply.length);
11463 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11464 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11465 (uint32_t *)operation_result);