2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
63 #include "amdgpu_dm_psr.h"
65 #include "ivsrcid/ivsrcid_vislands30.h"
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
76 #include <drm/drm_atomic.h>
77 #include <drm/drm_atomic_uapi.h>
78 #include <drm/drm_atomic_helper.h>
79 #include <drm/dp/drm_dp_mst_helper.h>
80 #include <drm/drm_fb_helper.h>
81 #include <drm/drm_fourcc.h>
82 #include <drm/drm_edid.h>
83 #include <drm/drm_vblank.h>
84 #include <drm/drm_audio_component.h>
86 #if defined(CONFIG_DRM_AMD_DC_DCN)
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
94 #include "soc15_common.h"
97 #include "modules/inc/mod_freesync.h"
98 #include "modules/power/power_helpers.h"
99 #include "modules/inc/mod_info_packet.h"
101 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
118 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
119 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
121 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
122 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
124 /* Number of bytes in PSP header for firmware. */
125 #define PSP_HEADER_BYTES 0x100
127 /* Number of bytes in PSP footer for firmware. */
128 #define PSP_FOOTER_BYTES 0x100
133 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
134 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
135 * requests into DC requests, and DC responses into DRM responses.
137 * The root control structure is &struct amdgpu_display_manager.
140 /* basic init/fini API */
141 static int amdgpu_dm_init(struct amdgpu_device *adev);
142 static void amdgpu_dm_fini(struct amdgpu_device *adev);
143 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
145 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
147 switch (link->dpcd_caps.dongle_type) {
148 case DISPLAY_DONGLE_NONE:
149 return DRM_MODE_SUBCONNECTOR_Native;
150 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
151 return DRM_MODE_SUBCONNECTOR_VGA;
152 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
153 case DISPLAY_DONGLE_DP_DVI_DONGLE:
154 return DRM_MODE_SUBCONNECTOR_DVID;
155 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
156 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
157 return DRM_MODE_SUBCONNECTOR_HDMIA;
158 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
160 return DRM_MODE_SUBCONNECTOR_Unknown;
164 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
166 struct dc_link *link = aconnector->dc_link;
167 struct drm_connector *connector = &aconnector->base;
168 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
170 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
173 if (aconnector->dc_sink)
174 subconnector = get_subconnector_type(link);
176 drm_object_property_set_value(&connector->base,
177 connector->dev->mode_config.dp_subconnector_property,
182 * initializes drm_device display related structures, based on the information
183 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
184 * drm_encoder, drm_mode_config
186 * Returns 0 on success
188 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
189 /* removes and deallocates the drm structures, created by the above function */
190 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
192 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
193 struct drm_plane *plane,
194 unsigned long possible_crtcs,
195 const struct dc_plane_cap *plane_cap);
196 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
197 struct drm_plane *plane,
198 uint32_t link_index);
199 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
200 struct amdgpu_dm_connector *amdgpu_dm_connector,
202 struct amdgpu_encoder *amdgpu_encoder);
203 static int amdgpu_dm_encoder_init(struct drm_device *dev,
204 struct amdgpu_encoder *aencoder,
205 uint32_t link_index);
207 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
209 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
211 static int amdgpu_dm_atomic_check(struct drm_device *dev,
212 struct drm_atomic_state *state);
214 static void handle_cursor_update(struct drm_plane *plane,
215 struct drm_plane_state *old_plane_state);
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
220 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
221 static void handle_hpd_rx_irq(void *param);
224 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
225 struct drm_crtc_state *new_crtc_state);
227 * dm_vblank_get_counter
230 * Get counter for number of vertical blanks
233 * struct amdgpu_device *adev - [in] desired amdgpu device
234 * int disp_idx - [in] which CRTC to get the counter from
237 * Counter for vertical blanks
239 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
241 if (crtc >= adev->mode_info.num_crtc)
244 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
246 if (acrtc->dm_irq_params.stream == NULL) {
247 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
252 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
256 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
257 u32 *vbl, u32 *position)
259 uint32_t v_blank_start, v_blank_end, h_position, v_position;
261 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
264 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
266 if (acrtc->dm_irq_params.stream == NULL) {
267 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
273 * TODO rework base driver to use values directly.
274 * for now parse it back into reg-format
276 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
282 *position = v_position | (h_position << 16);
283 *vbl = v_blank_start | (v_blank_end << 16);
289 static bool dm_is_idle(void *handle)
295 static int dm_wait_for_idle(void *handle)
301 static bool dm_check_soft_reset(void *handle)
306 static int dm_soft_reset(void *handle)
312 static struct amdgpu_crtc *
313 get_crtc_by_otg_inst(struct amdgpu_device *adev,
316 struct drm_device *dev = adev_to_drm(adev);
317 struct drm_crtc *crtc;
318 struct amdgpu_crtc *amdgpu_crtc;
320 if (WARN_ON(otg_inst == -1))
321 return adev->mode_info.crtcs[0];
323 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324 amdgpu_crtc = to_amdgpu_crtc(crtc);
326 if (amdgpu_crtc->otg_inst == otg_inst)
333 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
335 return acrtc->dm_irq_params.freesync_config.state ==
336 VRR_STATE_ACTIVE_VARIABLE ||
337 acrtc->dm_irq_params.freesync_config.state ==
338 VRR_STATE_ACTIVE_FIXED;
341 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
343 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
347 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348 struct dm_crtc_state *new_state)
350 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
352 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
359 * dm_pflip_high_irq() - Handle pageflip interrupt
360 * @interrupt_params: ignored
362 * Handles the pageflip interrupt by notifying all interested parties
363 * that the pageflip has been completed.
365 static void dm_pflip_high_irq(void *interrupt_params)
367 struct amdgpu_crtc *amdgpu_crtc;
368 struct common_irq_params *irq_params = interrupt_params;
369 struct amdgpu_device *adev = irq_params->adev;
371 struct drm_pending_vblank_event *e;
372 uint32_t vpos, hpos, v_blank_start, v_blank_end;
375 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
377 /* IRQ could occur when in initial stage */
378 /* TODO work and BO cleanup */
379 if (amdgpu_crtc == NULL) {
380 DC_LOG_PFLIP("CRTC is null, returning.\n");
384 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
386 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
387 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
388 amdgpu_crtc->pflip_status,
389 AMDGPU_FLIP_SUBMITTED,
390 amdgpu_crtc->crtc_id,
392 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
396 /* page flip completed. */
397 e = amdgpu_crtc->event;
398 amdgpu_crtc->event = NULL;
402 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
404 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
406 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
407 &v_blank_end, &hpos, &vpos) ||
408 (vpos < v_blank_start)) {
409 /* Update to correct count and vblank timestamp if racing with
410 * vblank irq. This also updates to the correct vblank timestamp
411 * even in VRR mode, as scanout is past the front-porch atm.
413 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
415 /* Wake up userspace by sending the pageflip event with proper
416 * count and timestamp of vblank of flip completion.
419 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
421 /* Event sent, so done with vblank for this flip */
422 drm_crtc_vblank_put(&amdgpu_crtc->base);
425 /* VRR active and inside front-porch: vblank count and
426 * timestamp for pageflip event will only be up to date after
427 * drm_crtc_handle_vblank() has been executed from late vblank
428 * irq handler after start of back-porch (vline 0). We queue the
429 * pageflip event for send-out by drm_crtc_handle_vblank() with
430 * updated timestamp and count, once it runs after us.
432 * We need to open-code this instead of using the helper
433 * drm_crtc_arm_vblank_event(), as that helper would
434 * call drm_crtc_accurate_vblank_count(), which we must
435 * not call in VRR mode while we are in front-porch!
438 /* sequence will be replaced by real count during send-out. */
439 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440 e->pipe = amdgpu_crtc->crtc_id;
442 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
446 /* Keep track of vblank of this flip for flip throttling. We use the
447 * cooked hw counter, as that one incremented at start of this vblank
448 * of pageflip completion, so last_flip_vblank is the forbidden count
449 * for queueing new pageflips if vsync + VRR is enabled.
451 amdgpu_crtc->dm_irq_params.last_flip_vblank =
452 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
454 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
455 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
457 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458 amdgpu_crtc->crtc_id, amdgpu_crtc,
459 vrr_active, (int) !e);
462 static void dm_vupdate_high_irq(void *interrupt_params)
464 struct common_irq_params *irq_params = interrupt_params;
465 struct amdgpu_device *adev = irq_params->adev;
466 struct amdgpu_crtc *acrtc;
467 struct drm_device *drm_dev;
468 struct drm_vblank_crtc *vblank;
469 ktime_t frame_duration_ns, previous_timestamp;
473 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
476 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
477 drm_dev = acrtc->base.dev;
478 vblank = &drm_dev->vblank[acrtc->base.index];
479 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480 frame_duration_ns = vblank->time - previous_timestamp;
482 if (frame_duration_ns > 0) {
483 trace_amdgpu_refresh_rate_track(acrtc->base.index,
485 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486 atomic64_set(&irq_params->previous_timestamp, vblank->time);
489 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
493 /* Core vblank handling is done here after end of front-porch in
494 * vrr mode, as vblank timestamping will give valid results
495 * while now done after front-porch. This will also deliver
496 * page-flip completion events that have been queued to us
497 * if a pageflip happened inside front-porch.
500 drm_crtc_handle_vblank(&acrtc->base);
502 /* BTR processing for pre-DCE12 ASICs */
503 if (acrtc->dm_irq_params.stream &&
504 adev->family < AMDGPU_FAMILY_AI) {
505 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
506 mod_freesync_handle_v_update(
507 adev->dm.freesync_module,
508 acrtc->dm_irq_params.stream,
509 &acrtc->dm_irq_params.vrr_params);
511 dc_stream_adjust_vmin_vmax(
513 acrtc->dm_irq_params.stream,
514 &acrtc->dm_irq_params.vrr_params.adjust);
515 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
522 * dm_crtc_high_irq() - Handles CRTC interrupt
523 * @interrupt_params: used for determining the CRTC instance
525 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
528 static void dm_crtc_high_irq(void *interrupt_params)
530 struct common_irq_params *irq_params = interrupt_params;
531 struct amdgpu_device *adev = irq_params->adev;
532 struct amdgpu_crtc *acrtc;
536 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
540 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
542 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
543 vrr_active, acrtc->dm_irq_params.active_planes);
546 * Core vblank handling at start of front-porch is only possible
547 * in non-vrr mode, as only there vblank timestamping will give
548 * valid results while done in front-porch. Otherwise defer it
549 * to dm_vupdate_high_irq after end of front-porch.
552 drm_crtc_handle_vblank(&acrtc->base);
555 * Following stuff must happen at start of vblank, for crc
556 * computation and below-the-range btr support in vrr mode.
558 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
560 /* BTR updates need to happen before VUPDATE on Vega and above. */
561 if (adev->family < AMDGPU_FAMILY_AI)
564 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
566 if (acrtc->dm_irq_params.stream &&
567 acrtc->dm_irq_params.vrr_params.supported &&
568 acrtc->dm_irq_params.freesync_config.state ==
569 VRR_STATE_ACTIVE_VARIABLE) {
570 mod_freesync_handle_v_update(adev->dm.freesync_module,
571 acrtc->dm_irq_params.stream,
572 &acrtc->dm_irq_params.vrr_params);
574 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575 &acrtc->dm_irq_params.vrr_params.adjust);
579 * If there aren't any active_planes then DCH HUBP may be clock-gated.
580 * In that case, pageflip completion interrupts won't fire and pageflip
581 * completion events won't get delivered. Prevent this by sending
582 * pending pageflip events from here if a flip is still pending.
584 * If any planes are enabled, use dm_pflip_high_irq() instead, to
585 * avoid race conditions between flip programming and completion,
586 * which could cause too early flip completion events.
588 if (adev->family >= AMDGPU_FAMILY_RV &&
589 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
590 acrtc->dm_irq_params.active_planes == 0) {
592 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
594 drm_crtc_vblank_put(&acrtc->base);
596 acrtc->pflip_status = AMDGPU_FLIP_NONE;
599 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
602 #if defined(CONFIG_DRM_AMD_DC_DCN)
603 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
605 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
606 * DCN generation ASICs
607 * @interrupt_params: interrupt parameters
609 * Used to set crc window/read out crc value at vertical line 0 position
611 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
613 struct common_irq_params *irq_params = interrupt_params;
614 struct amdgpu_device *adev = irq_params->adev;
615 struct amdgpu_crtc *acrtc;
617 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
622 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
624 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
627 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
628 * @adev: amdgpu_device pointer
629 * @notify: dmub notification structure
631 * Dmub AUX or SET_CONFIG command completion processing callback
632 * Copies dmub notification to DM which is to be read by AUX command.
633 * issuing thread and also signals the event to wake up the thread.
635 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
636 struct dmub_notification *notify)
638 if (adev->dm.dmub_notify)
639 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
640 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
641 complete(&adev->dm.dmub_aux_transfer_done);
645 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
646 * @adev: amdgpu_device pointer
647 * @notify: dmub notification structure
649 * Dmub Hpd interrupt processing callback. Gets displayindex through the
650 * ink index and calls helper to do the processing.
652 static void dmub_hpd_callback(struct amdgpu_device *adev,
653 struct dmub_notification *notify)
655 struct amdgpu_dm_connector *aconnector;
656 struct amdgpu_dm_connector *hpd_aconnector = NULL;
657 struct drm_connector *connector;
658 struct drm_connector_list_iter iter;
659 struct dc_link *link;
660 uint8_t link_index = 0;
661 struct drm_device *dev;
666 if (notify == NULL) {
667 DRM_ERROR("DMUB HPD callback notification was NULL");
671 if (notify->link_index > adev->dm.dc->link_count) {
672 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
676 link_index = notify->link_index;
677 link = adev->dm.dc->links[link_index];
680 drm_connector_list_iter_begin(dev, &iter);
681 drm_for_each_connector_iter(connector, &iter) {
682 aconnector = to_amdgpu_dm_connector(connector);
683 if (link && aconnector->dc_link == link) {
684 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
685 hpd_aconnector = aconnector;
689 drm_connector_list_iter_end(&iter);
691 if (hpd_aconnector) {
692 if (notify->type == DMUB_NOTIFICATION_HPD)
693 handle_hpd_irq_helper(hpd_aconnector);
694 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
695 handle_hpd_rx_irq(hpd_aconnector);
700 * register_dmub_notify_callback - Sets callback for DMUB notify
701 * @adev: amdgpu_device pointer
702 * @type: Type of dmub notification
703 * @callback: Dmub interrupt callback function
704 * @dmub_int_thread_offload: offload indicator
706 * API to register a dmub callback handler for a dmub notification
707 * Also sets indicator whether callback processing to be offloaded.
708 * to dmub interrupt handling thread
709 * Return: true if successfully registered, false if there is existing registration
711 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
712 enum dmub_notification_type type,
713 dmub_notify_interrupt_callback_t callback,
714 bool dmub_int_thread_offload)
716 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
717 adev->dm.dmub_callback[type] = callback;
718 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
725 static void dm_handle_hpd_work(struct work_struct *work)
727 struct dmub_hpd_work *dmub_hpd_wrk;
729 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
731 if (!dmub_hpd_wrk->dmub_notify) {
732 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
736 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
737 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
738 dmub_hpd_wrk->dmub_notify);
741 kfree(dmub_hpd_wrk->dmub_notify);
746 #define DMUB_TRACE_MAX_READ 64
748 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
749 * @interrupt_params: used for determining the Outbox instance
751 * Handles the Outbox Interrupt
754 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
756 struct dmub_notification notify;
757 struct common_irq_params *irq_params = interrupt_params;
758 struct amdgpu_device *adev = irq_params->adev;
759 struct amdgpu_display_manager *dm = &adev->dm;
760 struct dmcub_trace_buf_entry entry = { 0 };
762 struct dmub_hpd_work *dmub_hpd_wrk;
763 struct dc_link *plink = NULL;
765 if (dc_enable_dmub_notifications(adev->dm.dc) &&
766 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
769 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify);
770 if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
771 DRM_ERROR("DM: notify type %d invalid!", notify.type);
774 if (!dm->dmub_callback[notify.type]) {
775 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
778 if (dm->dmub_thread_offload[notify.type] == true) {
779 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
781 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
784 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
785 if (!dmub_hpd_wrk->dmub_notify) {
787 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
790 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
791 if (dmub_hpd_wrk->dmub_notify)
792 memcpy(dmub_hpd_wrk->dmub_notify, ¬ify, sizeof(struct dmub_notification));
793 dmub_hpd_wrk->adev = adev;
794 if (notify.type == DMUB_NOTIFICATION_HPD) {
795 plink = adev->dm.dc->links[notify.link_index];
798 notify.hpd_status == DP_HPD_PLUG;
801 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
803 dm->dmub_callback[notify.type](adev, ¬ify);
805 } while (notify.pending_notification);
810 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
811 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
812 entry.param0, entry.param1);
814 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
815 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
821 } while (count <= DMUB_TRACE_MAX_READ);
823 if (count > DMUB_TRACE_MAX_READ)
824 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
826 #endif /* CONFIG_DRM_AMD_DC_DCN */
828 static int dm_set_clockgating_state(void *handle,
829 enum amd_clockgating_state state)
834 static int dm_set_powergating_state(void *handle,
835 enum amd_powergating_state state)
840 /* Prototypes of private functions */
841 static int dm_early_init(void* handle);
843 /* Allocate memory for FBC compressed data */
844 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
846 struct drm_device *dev = connector->dev;
847 struct amdgpu_device *adev = drm_to_adev(dev);
848 struct dm_compressor_info *compressor = &adev->dm.compressor;
849 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
850 struct drm_display_mode *mode;
851 unsigned long max_size = 0;
853 if (adev->dm.dc->fbc_compressor == NULL)
856 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
859 if (compressor->bo_ptr)
863 list_for_each_entry(mode, &connector->modes, head) {
864 if (max_size < mode->htotal * mode->vtotal)
865 max_size = mode->htotal * mode->vtotal;
869 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
870 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
871 &compressor->gpu_addr, &compressor->cpu_addr);
874 DRM_ERROR("DM: Failed to initialize FBC\n");
876 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
877 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
884 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
885 int pipe, bool *enabled,
886 unsigned char *buf, int max_bytes)
888 struct drm_device *dev = dev_get_drvdata(kdev);
889 struct amdgpu_device *adev = drm_to_adev(dev);
890 struct drm_connector *connector;
891 struct drm_connector_list_iter conn_iter;
892 struct amdgpu_dm_connector *aconnector;
897 mutex_lock(&adev->dm.audio_lock);
899 drm_connector_list_iter_begin(dev, &conn_iter);
900 drm_for_each_connector_iter(connector, &conn_iter) {
901 aconnector = to_amdgpu_dm_connector(connector);
902 if (aconnector->audio_inst != port)
906 ret = drm_eld_size(connector->eld);
907 memcpy(buf, connector->eld, min(max_bytes, ret));
911 drm_connector_list_iter_end(&conn_iter);
913 mutex_unlock(&adev->dm.audio_lock);
915 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
920 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
921 .get_eld = amdgpu_dm_audio_component_get_eld,
924 static int amdgpu_dm_audio_component_bind(struct device *kdev,
925 struct device *hda_kdev, void *data)
927 struct drm_device *dev = dev_get_drvdata(kdev);
928 struct amdgpu_device *adev = drm_to_adev(dev);
929 struct drm_audio_component *acomp = data;
931 acomp->ops = &amdgpu_dm_audio_component_ops;
933 adev->dm.audio_component = acomp;
938 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
939 struct device *hda_kdev, void *data)
941 struct drm_device *dev = dev_get_drvdata(kdev);
942 struct amdgpu_device *adev = drm_to_adev(dev);
943 struct drm_audio_component *acomp = data;
947 adev->dm.audio_component = NULL;
950 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
951 .bind = amdgpu_dm_audio_component_bind,
952 .unbind = amdgpu_dm_audio_component_unbind,
955 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
962 adev->mode_info.audio.enabled = true;
964 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
966 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
967 adev->mode_info.audio.pin[i].channels = -1;
968 adev->mode_info.audio.pin[i].rate = -1;
969 adev->mode_info.audio.pin[i].bits_per_sample = -1;
970 adev->mode_info.audio.pin[i].status_bits = 0;
971 adev->mode_info.audio.pin[i].category_code = 0;
972 adev->mode_info.audio.pin[i].connected = false;
973 adev->mode_info.audio.pin[i].id =
974 adev->dm.dc->res_pool->audios[i]->inst;
975 adev->mode_info.audio.pin[i].offset = 0;
978 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
982 adev->dm.audio_registered = true;
987 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
992 if (!adev->mode_info.audio.enabled)
995 if (adev->dm.audio_registered) {
996 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
997 adev->dm.audio_registered = false;
1000 /* TODO: Disable audio? */
1002 adev->mode_info.audio.enabled = false;
1005 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1007 struct drm_audio_component *acomp = adev->dm.audio_component;
1009 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1010 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1012 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1017 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1019 const struct dmcub_firmware_header_v1_0 *hdr;
1020 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1021 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1022 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1023 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1024 struct abm *abm = adev->dm.dc->res_pool->abm;
1025 struct dmub_srv_hw_params hw_params;
1026 enum dmub_status status;
1027 const unsigned char *fw_inst_const, *fw_bss_data;
1028 uint32_t i, fw_inst_const_size, fw_bss_data_size;
1029 bool has_hw_support;
1030 struct dc *dc = adev->dm.dc;
1033 /* DMUB isn't supported on the ASIC. */
1037 DRM_ERROR("No framebuffer info for DMUB service.\n");
1042 /* Firmware required for DMUB support. */
1043 DRM_ERROR("No firmware provided for DMUB.\n");
1047 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1048 if (status != DMUB_STATUS_OK) {
1049 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1053 if (!has_hw_support) {
1054 DRM_INFO("DMUB unsupported on ASIC\n");
1058 /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1059 status = dmub_srv_hw_reset(dmub_srv);
1060 if (status != DMUB_STATUS_OK)
1061 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1063 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1065 fw_inst_const = dmub_fw->data +
1066 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1069 fw_bss_data = dmub_fw->data +
1070 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1071 le32_to_cpu(hdr->inst_const_bytes);
1073 /* Copy firmware and bios info into FB memory. */
1074 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1075 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1077 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1079 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1080 * amdgpu_ucode_init_single_fw will load dmub firmware
1081 * fw_inst_const part to cw0; otherwise, the firmware back door load
1082 * will be done by dm_dmub_hw_init
1084 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1085 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1086 fw_inst_const_size);
1089 if (fw_bss_data_size)
1090 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1091 fw_bss_data, fw_bss_data_size);
1093 /* Copy firmware bios info into FB memory. */
1094 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1097 /* Reset regions that need to be reset. */
1098 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1099 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1101 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1102 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1104 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1105 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1107 /* Initialize hardware. */
1108 memset(&hw_params, 0, sizeof(hw_params));
1109 hw_params.fb_base = adev->gmc.fb_start;
1110 hw_params.fb_offset = adev->gmc.aper_base;
1112 /* backdoor load firmware and trigger dmub running */
1113 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1114 hw_params.load_inst_const = true;
1117 hw_params.psp_version = dmcu->psp_version;
1119 for (i = 0; i < fb_info->num_fb; ++i)
1120 hw_params.fb[i] = &fb_info->fb[i];
1122 switch (adev->asic_type) {
1123 case CHIP_YELLOW_CARP:
1124 if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
1125 hw_params.dpia_supported = true;
1126 #if defined(CONFIG_DRM_AMD_DC_DCN)
1127 hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
1135 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1136 if (status != DMUB_STATUS_OK) {
1137 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1141 /* Wait for firmware load to finish. */
1142 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1143 if (status != DMUB_STATUS_OK)
1144 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1146 /* Init DMCU and ABM if available. */
1148 dmcu->funcs->dmcu_init(dmcu);
1149 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1152 if (!adev->dm.dc->ctx->dmub_srv)
1153 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1154 if (!adev->dm.dc->ctx->dmub_srv) {
1155 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1159 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1160 adev->dm.dmcub_fw_version);
1165 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1167 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1168 enum dmub_status status;
1172 /* DMUB isn't supported on the ASIC. */
1176 status = dmub_srv_is_hw_init(dmub_srv, &init);
1177 if (status != DMUB_STATUS_OK)
1178 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1180 if (status == DMUB_STATUS_OK && init) {
1181 /* Wait for firmware load to finish. */
1182 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1183 if (status != DMUB_STATUS_OK)
1184 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1186 /* Perform the full hardware initialization. */
1187 dm_dmub_hw_init(adev);
1191 #if defined(CONFIG_DRM_AMD_DC_DCN)
1192 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1195 uint32_t logical_addr_low;
1196 uint32_t logical_addr_high;
1197 uint32_t agp_base, agp_bot, agp_top;
1198 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1200 memset(pa_config, 0, sizeof(*pa_config));
1202 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1203 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1205 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1207 * Raven2 has a HW issue that it is unable to use the vram which
1208 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1209 * workaround that increase system aperture high address (add 1)
1210 * to get rid of the VM fault and hardware hang.
1212 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1214 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1217 agp_bot = adev->gmc.agp_start >> 24;
1218 agp_top = adev->gmc.agp_end >> 24;
1221 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1222 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1223 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1224 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1225 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1226 page_table_base.low_part = lower_32_bits(pt_base);
1228 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1229 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1231 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1232 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1233 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1235 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1236 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1237 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1239 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1240 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1241 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1243 pa_config->is_hvm_enabled = 0;
1247 #if defined(CONFIG_DRM_AMD_DC_DCN)
1248 static void vblank_control_worker(struct work_struct *work)
1250 struct vblank_control_work *vblank_work =
1251 container_of(work, struct vblank_control_work, work);
1252 struct amdgpu_display_manager *dm = vblank_work->dm;
1254 mutex_lock(&dm->dc_lock);
1256 if (vblank_work->enable)
1257 dm->active_vblank_irq_count++;
1258 else if(dm->active_vblank_irq_count)
1259 dm->active_vblank_irq_count--;
1261 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1263 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1265 /* Control PSR based on vblank requirements from OS */
1266 if (vblank_work->stream && vblank_work->stream->link) {
1267 if (vblank_work->enable) {
1268 if (vblank_work->stream->link->psr_settings.psr_allow_active)
1269 amdgpu_dm_psr_disable(vblank_work->stream);
1270 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1271 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1272 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1273 amdgpu_dm_psr_enable(vblank_work->stream);
1277 mutex_unlock(&dm->dc_lock);
1279 dc_stream_release(vblank_work->stream);
1286 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1288 struct hpd_rx_irq_offload_work *offload_work;
1289 struct amdgpu_dm_connector *aconnector;
1290 struct dc_link *dc_link;
1291 struct amdgpu_device *adev;
1292 enum dc_connection_type new_connection_type = dc_connection_none;
1293 unsigned long flags;
1295 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1296 aconnector = offload_work->offload_wq->aconnector;
1299 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1303 adev = drm_to_adev(aconnector->base.dev);
1304 dc_link = aconnector->dc_link;
1306 mutex_lock(&aconnector->hpd_lock);
1307 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1308 DRM_ERROR("KMS: Failed to detect connector\n");
1309 mutex_unlock(&aconnector->hpd_lock);
1311 if (new_connection_type == dc_connection_none)
1314 if (amdgpu_in_reset(adev))
1317 mutex_lock(&adev->dm.dc_lock);
1318 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1319 dc_link_dp_handle_automated_test(dc_link);
1320 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1321 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1322 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1323 dc_link_dp_handle_link_loss(dc_link);
1324 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1325 offload_work->offload_wq->is_handling_link_loss = false;
1326 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1328 mutex_unlock(&adev->dm.dc_lock);
1331 kfree(offload_work);
1335 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1337 int max_caps = dc->caps.max_links;
1339 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1341 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1343 if (!hpd_rx_offload_wq)
1347 for (i = 0; i < max_caps; i++) {
1348 hpd_rx_offload_wq[i].wq =
1349 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1351 if (hpd_rx_offload_wq[i].wq == NULL) {
1352 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1356 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1359 return hpd_rx_offload_wq;
1362 struct amdgpu_stutter_quirk {
1370 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1371 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1372 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1376 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1378 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1380 while (p && p->chip_device != 0) {
1381 if (pdev->vendor == p->chip_vendor &&
1382 pdev->device == p->chip_device &&
1383 pdev->subsystem_vendor == p->subsys_vendor &&
1384 pdev->subsystem_device == p->subsys_device &&
1385 pdev->revision == p->revision) {
1393 static int amdgpu_dm_init(struct amdgpu_device *adev)
1395 struct dc_init_data init_data;
1396 #ifdef CONFIG_DRM_AMD_DC_HDCP
1397 struct dc_callback_init init_params;
1401 adev->dm.ddev = adev_to_drm(adev);
1402 adev->dm.adev = adev;
1404 /* Zero all the fields */
1405 memset(&init_data, 0, sizeof(init_data));
1406 #ifdef CONFIG_DRM_AMD_DC_HDCP
1407 memset(&init_params, 0, sizeof(init_params));
1410 mutex_init(&adev->dm.dc_lock);
1411 mutex_init(&adev->dm.audio_lock);
1412 #if defined(CONFIG_DRM_AMD_DC_DCN)
1413 spin_lock_init(&adev->dm.vblank_lock);
1416 if(amdgpu_dm_irq_init(adev)) {
1417 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1421 init_data.asic_id.chip_family = adev->family;
1423 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1424 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1425 init_data.asic_id.chip_id = adev->pdev->device;
1427 init_data.asic_id.vram_width = adev->gmc.vram_width;
1428 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1429 init_data.asic_id.atombios_base_address =
1430 adev->mode_info.atom_context->bios;
1432 init_data.driver = adev;
1434 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1436 if (!adev->dm.cgs_device) {
1437 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1441 init_data.cgs_device = adev->dm.cgs_device;
1443 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1445 switch (adev->asic_type) {
1448 init_data.flags.gpu_vm_support = true;
1451 switch (adev->ip_versions[DCE_HWIP][0]) {
1452 case IP_VERSION(2, 1, 0):
1453 init_data.flags.gpu_vm_support = true;
1454 switch (adev->dm.dmcub_fw_version) {
1455 case 0: /* development */
1456 case 0x1: /* linux-firmware.git hash 6d9f399 */
1457 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1458 init_data.flags.disable_dmcu = false;
1461 init_data.flags.disable_dmcu = true;
1464 case IP_VERSION(1, 0, 0):
1465 case IP_VERSION(1, 0, 1):
1466 case IP_VERSION(3, 0, 1):
1467 case IP_VERSION(3, 1, 2):
1468 case IP_VERSION(3, 1, 3):
1469 init_data.flags.gpu_vm_support = true;
1471 case IP_VERSION(2, 0, 3):
1472 init_data.flags.disable_dmcu = true;
1480 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1481 init_data.flags.fbc_support = true;
1483 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1484 init_data.flags.multi_mon_pp_mclk_switch = true;
1486 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1487 init_data.flags.disable_fractional_pwm = true;
1489 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1490 init_data.flags.edp_no_power_sequencing = true;
1492 #ifdef CONFIG_DRM_AMD_DC_DCN
1493 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1494 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1495 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1496 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1499 init_data.flags.power_down_display_on_boot = true;
1501 if (check_seamless_boot_capability(adev)) {
1502 init_data.flags.power_down_display_on_boot = false;
1503 init_data.flags.allow_seamless_boot_optimization = true;
1504 DRM_INFO("Seamless boot condition check passed\n");
1507 INIT_LIST_HEAD(&adev->dm.da_list);
1508 /* Display Core create. */
1509 adev->dm.dc = dc_create(&init_data);
1512 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1514 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1518 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1519 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1520 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1523 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1524 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1525 if (dm_should_disable_stutter(adev->pdev))
1526 adev->dm.dc->debug.disable_stutter = true;
1528 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1529 adev->dm.dc->debug.disable_stutter = true;
1531 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1532 adev->dm.dc->debug.disable_dsc = true;
1533 adev->dm.dc->debug.disable_dsc_edp = true;
1536 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1537 adev->dm.dc->debug.disable_clock_gate = true;
1539 r = dm_dmub_hw_init(adev);
1541 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1545 dc_hardware_init(adev->dm.dc);
1547 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1548 if (!adev->dm.hpd_rx_offload_wq) {
1549 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1553 #if defined(CONFIG_DRM_AMD_DC_DCN)
1554 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1555 struct dc_phy_addr_space_config pa_config;
1557 mmhub_read_system_context(adev, &pa_config);
1559 // Call the DC init_memory func
1560 dc_setup_system_context(adev->dm.dc, &pa_config);
1564 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1565 if (!adev->dm.freesync_module) {
1567 "amdgpu: failed to initialize freesync_module.\n");
1569 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1570 adev->dm.freesync_module);
1572 amdgpu_dm_init_color_mod();
1574 #if defined(CONFIG_DRM_AMD_DC_DCN)
1575 if (adev->dm.dc->caps.max_links > 0) {
1576 adev->dm.vblank_control_workqueue =
1577 create_singlethread_workqueue("dm_vblank_control_workqueue");
1578 if (!adev->dm.vblank_control_workqueue)
1579 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1583 #ifdef CONFIG_DRM_AMD_DC_HDCP
1584 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1585 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1587 if (!adev->dm.hdcp_workqueue)
1588 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1590 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1592 dc_init_callbacks(adev->dm.dc, &init_params);
1595 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1596 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1598 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1599 init_completion(&adev->dm.dmub_aux_transfer_done);
1600 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1601 if (!adev->dm.dmub_notify) {
1602 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1606 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1607 if (!adev->dm.delayed_hpd_wq) {
1608 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1612 amdgpu_dm_outbox_init(adev);
1613 #if defined(CONFIG_DRM_AMD_DC_DCN)
1614 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1615 dmub_aux_setconfig_callback, false)) {
1616 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1619 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1620 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1623 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1624 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1627 #endif /* CONFIG_DRM_AMD_DC_DCN */
1630 if (amdgpu_dm_initialize_drm_device(adev)) {
1632 "amdgpu: failed to initialize sw for display support.\n");
1636 /* create fake encoders for MST */
1637 dm_dp_create_fake_mst_encoders(adev);
1639 /* TODO: Add_display_info? */
1641 /* TODO use dynamic cursor width */
1642 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1643 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1645 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1647 "amdgpu: failed to initialize sw for display support.\n");
1652 DRM_DEBUG_DRIVER("KMS initialized.\n");
1656 amdgpu_dm_fini(adev);
1661 static int amdgpu_dm_early_fini(void *handle)
1663 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1665 amdgpu_dm_audio_fini(adev);
1670 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1674 #if defined(CONFIG_DRM_AMD_DC_DCN)
1675 if (adev->dm.vblank_control_workqueue) {
1676 destroy_workqueue(adev->dm.vblank_control_workqueue);
1677 adev->dm.vblank_control_workqueue = NULL;
1681 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1682 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1685 amdgpu_dm_destroy_drm_device(&adev->dm);
1687 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1688 if (adev->dm.crc_rd_wrk) {
1689 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1690 kfree(adev->dm.crc_rd_wrk);
1691 adev->dm.crc_rd_wrk = NULL;
1694 #ifdef CONFIG_DRM_AMD_DC_HDCP
1695 if (adev->dm.hdcp_workqueue) {
1696 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1697 adev->dm.hdcp_workqueue = NULL;
1701 dc_deinit_callbacks(adev->dm.dc);
1704 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1706 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1707 kfree(adev->dm.dmub_notify);
1708 adev->dm.dmub_notify = NULL;
1709 destroy_workqueue(adev->dm.delayed_hpd_wq);
1710 adev->dm.delayed_hpd_wq = NULL;
1713 if (adev->dm.dmub_bo)
1714 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1715 &adev->dm.dmub_bo_gpu_addr,
1716 &adev->dm.dmub_bo_cpu_addr);
1718 if (adev->dm.hpd_rx_offload_wq) {
1719 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1720 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1721 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1722 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1726 kfree(adev->dm.hpd_rx_offload_wq);
1727 adev->dm.hpd_rx_offload_wq = NULL;
1730 /* DC Destroy TODO: Replace destroy DAL */
1732 dc_destroy(&adev->dm.dc);
1734 * TODO: pageflip, vlank interrupt
1736 * amdgpu_dm_irq_fini(adev);
1739 if (adev->dm.cgs_device) {
1740 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1741 adev->dm.cgs_device = NULL;
1743 if (adev->dm.freesync_module) {
1744 mod_freesync_destroy(adev->dm.freesync_module);
1745 adev->dm.freesync_module = NULL;
1748 mutex_destroy(&adev->dm.audio_lock);
1749 mutex_destroy(&adev->dm.dc_lock);
1754 static int load_dmcu_fw(struct amdgpu_device *adev)
1756 const char *fw_name_dmcu = NULL;
1758 const struct dmcu_firmware_header_v1_0 *hdr;
1760 switch(adev->asic_type) {
1761 #if defined(CONFIG_DRM_AMD_DC_SI)
1776 case CHIP_POLARIS11:
1777 case CHIP_POLARIS10:
1778 case CHIP_POLARIS12:
1785 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1788 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1789 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1790 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1791 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1796 switch (adev->ip_versions[DCE_HWIP][0]) {
1797 case IP_VERSION(2, 0, 2):
1798 case IP_VERSION(2, 0, 3):
1799 case IP_VERSION(2, 0, 0):
1800 case IP_VERSION(2, 1, 0):
1801 case IP_VERSION(3, 0, 0):
1802 case IP_VERSION(3, 0, 2):
1803 case IP_VERSION(3, 0, 3):
1804 case IP_VERSION(3, 0, 1):
1805 case IP_VERSION(3, 1, 2):
1806 case IP_VERSION(3, 1, 3):
1811 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1815 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1816 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1820 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1822 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1823 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1824 adev->dm.fw_dmcu = NULL;
1828 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1833 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1835 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1837 release_firmware(adev->dm.fw_dmcu);
1838 adev->dm.fw_dmcu = NULL;
1842 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1843 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1844 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1845 adev->firmware.fw_size +=
1846 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1848 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1849 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1850 adev->firmware.fw_size +=
1851 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1853 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1855 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1860 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1862 struct amdgpu_device *adev = ctx;
1864 return dm_read_reg(adev->dm.dc->ctx, address);
1867 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1870 struct amdgpu_device *adev = ctx;
1872 return dm_write_reg(adev->dm.dc->ctx, address, value);
1875 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1877 struct dmub_srv_create_params create_params;
1878 struct dmub_srv_region_params region_params;
1879 struct dmub_srv_region_info region_info;
1880 struct dmub_srv_fb_params fb_params;
1881 struct dmub_srv_fb_info *fb_info;
1882 struct dmub_srv *dmub_srv;
1883 const struct dmcub_firmware_header_v1_0 *hdr;
1884 const char *fw_name_dmub;
1885 enum dmub_asic dmub_asic;
1886 enum dmub_status status;
1889 switch (adev->ip_versions[DCE_HWIP][0]) {
1890 case IP_VERSION(2, 1, 0):
1891 dmub_asic = DMUB_ASIC_DCN21;
1892 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1893 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1894 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1896 case IP_VERSION(3, 0, 0):
1897 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1898 dmub_asic = DMUB_ASIC_DCN30;
1899 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1901 dmub_asic = DMUB_ASIC_DCN30;
1902 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1905 case IP_VERSION(3, 0, 1):
1906 dmub_asic = DMUB_ASIC_DCN301;
1907 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1909 case IP_VERSION(3, 0, 2):
1910 dmub_asic = DMUB_ASIC_DCN302;
1911 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1913 case IP_VERSION(3, 0, 3):
1914 dmub_asic = DMUB_ASIC_DCN303;
1915 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1917 case IP_VERSION(3, 1, 2):
1918 case IP_VERSION(3, 1, 3):
1919 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1920 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1924 /* ASIC doesn't support DMUB. */
1928 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1930 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1934 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1936 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1940 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1941 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1943 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1944 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1945 AMDGPU_UCODE_ID_DMCUB;
1946 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1948 adev->firmware.fw_size +=
1949 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1951 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1952 adev->dm.dmcub_fw_version);
1956 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1957 dmub_srv = adev->dm.dmub_srv;
1960 DRM_ERROR("Failed to allocate DMUB service!\n");
1964 memset(&create_params, 0, sizeof(create_params));
1965 create_params.user_ctx = adev;
1966 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1967 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1968 create_params.asic = dmub_asic;
1970 /* Create the DMUB service. */
1971 status = dmub_srv_create(dmub_srv, &create_params);
1972 if (status != DMUB_STATUS_OK) {
1973 DRM_ERROR("Error creating DMUB service: %d\n", status);
1977 /* Calculate the size of all the regions for the DMUB service. */
1978 memset(®ion_params, 0, sizeof(region_params));
1980 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1981 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1982 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1983 region_params.vbios_size = adev->bios_size;
1984 region_params.fw_bss_data = region_params.bss_data_size ?
1985 adev->dm.dmub_fw->data +
1986 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1987 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1988 region_params.fw_inst_const =
1989 adev->dm.dmub_fw->data +
1990 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1993 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1996 if (status != DMUB_STATUS_OK) {
1997 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2002 * Allocate a framebuffer based on the total size of all the regions.
2003 * TODO: Move this into GART.
2005 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2006 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2007 &adev->dm.dmub_bo_gpu_addr,
2008 &adev->dm.dmub_bo_cpu_addr);
2012 /* Rebase the regions on the framebuffer address. */
2013 memset(&fb_params, 0, sizeof(fb_params));
2014 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2015 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2016 fb_params.region_info = ®ion_info;
2018 adev->dm.dmub_fb_info =
2019 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2020 fb_info = adev->dm.dmub_fb_info;
2024 "Failed to allocate framebuffer info for DMUB service!\n");
2028 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2029 if (status != DMUB_STATUS_OK) {
2030 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2037 static int dm_sw_init(void *handle)
2039 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2042 r = dm_dmub_sw_init(adev);
2046 return load_dmcu_fw(adev);
2049 static int dm_sw_fini(void *handle)
2051 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2053 kfree(adev->dm.dmub_fb_info);
2054 adev->dm.dmub_fb_info = NULL;
2056 if (adev->dm.dmub_srv) {
2057 dmub_srv_destroy(adev->dm.dmub_srv);
2058 adev->dm.dmub_srv = NULL;
2061 release_firmware(adev->dm.dmub_fw);
2062 adev->dm.dmub_fw = NULL;
2064 release_firmware(adev->dm.fw_dmcu);
2065 adev->dm.fw_dmcu = NULL;
2070 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2072 struct amdgpu_dm_connector *aconnector;
2073 struct drm_connector *connector;
2074 struct drm_connector_list_iter iter;
2077 drm_connector_list_iter_begin(dev, &iter);
2078 drm_for_each_connector_iter(connector, &iter) {
2079 aconnector = to_amdgpu_dm_connector(connector);
2080 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2081 aconnector->mst_mgr.aux) {
2082 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2084 aconnector->base.base.id);
2086 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2088 DRM_ERROR("DM_MST: Failed to start MST\n");
2089 aconnector->dc_link->type =
2090 dc_connection_single;
2095 drm_connector_list_iter_end(&iter);
2100 static int dm_late_init(void *handle)
2102 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2104 struct dmcu_iram_parameters params;
2105 unsigned int linear_lut[16];
2107 struct dmcu *dmcu = NULL;
2109 dmcu = adev->dm.dc->res_pool->dmcu;
2111 for (i = 0; i < 16; i++)
2112 linear_lut[i] = 0xFFFF * i / 15;
2115 params.backlight_ramping_override = false;
2116 params.backlight_ramping_start = 0xCCCC;
2117 params.backlight_ramping_reduction = 0xCCCCCCCC;
2118 params.backlight_lut_array_size = 16;
2119 params.backlight_lut_array = linear_lut;
2121 /* Min backlight level after ABM reduction, Don't allow below 1%
2122 * 0xFFFF x 0.01 = 0x28F
2124 params.min_abm_backlight = 0x28F;
2125 /* In the case where abm is implemented on dmcub,
2126 * dmcu object will be null.
2127 * ABM 2.4 and up are implemented on dmcub.
2130 if (!dmcu_load_iram(dmcu, params))
2132 } else if (adev->dm.dc->ctx->dmub_srv) {
2133 struct dc_link *edp_links[MAX_NUM_EDP];
2136 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2137 for (i = 0; i < edp_num; i++) {
2138 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2143 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2146 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2148 struct amdgpu_dm_connector *aconnector;
2149 struct drm_connector *connector;
2150 struct drm_connector_list_iter iter;
2151 struct drm_dp_mst_topology_mgr *mgr;
2153 bool need_hotplug = false;
2155 drm_connector_list_iter_begin(dev, &iter);
2156 drm_for_each_connector_iter(connector, &iter) {
2157 aconnector = to_amdgpu_dm_connector(connector);
2158 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2159 aconnector->mst_port)
2162 mgr = &aconnector->mst_mgr;
2165 drm_dp_mst_topology_mgr_suspend(mgr);
2167 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2169 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2170 need_hotplug = true;
2174 drm_connector_list_iter_end(&iter);
2177 drm_kms_helper_hotplug_event(dev);
2180 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2182 struct smu_context *smu = &adev->smu;
2185 if (!is_support_sw_smu(adev))
2188 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2189 * on window driver dc implementation.
2190 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2191 * should be passed to smu during boot up and resume from s3.
2192 * boot up: dc calculate dcn watermark clock settings within dc_create,
2193 * dcn20_resource_construct
2194 * then call pplib functions below to pass the settings to smu:
2195 * smu_set_watermarks_for_clock_ranges
2196 * smu_set_watermarks_table
2197 * navi10_set_watermarks_table
2198 * smu_write_watermarks_table
2200 * For Renoir, clock settings of dcn watermark are also fixed values.
2201 * dc has implemented different flow for window driver:
2202 * dc_hardware_init / dc_set_power_state
2207 * smu_set_watermarks_for_clock_ranges
2208 * renoir_set_watermarks_table
2209 * smu_write_watermarks_table
2212 * dc_hardware_init -> amdgpu_dm_init
2213 * dc_set_power_state --> dm_resume
2215 * therefore, this function apply to navi10/12/14 but not Renoir
2218 switch (adev->ip_versions[DCE_HWIP][0]) {
2219 case IP_VERSION(2, 0, 2):
2220 case IP_VERSION(2, 0, 0):
2226 ret = smu_write_watermarks_table(smu);
2228 DRM_ERROR("Failed to update WMTABLE!\n");
2236 * dm_hw_init() - Initialize DC device
2237 * @handle: The base driver device containing the amdgpu_dm device.
2239 * Initialize the &struct amdgpu_display_manager device. This involves calling
2240 * the initializers of each DM component, then populating the struct with them.
2242 * Although the function implies hardware initialization, both hardware and
2243 * software are initialized here. Splitting them out to their relevant init
2244 * hooks is a future TODO item.
2246 * Some notable things that are initialized here:
2248 * - Display Core, both software and hardware
2249 * - DC modules that we need (freesync and color management)
2250 * - DRM software states
2251 * - Interrupt sources and handlers
2253 * - Debug FS entries, if enabled
2255 static int dm_hw_init(void *handle)
2257 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2258 /* Create DAL display manager */
2259 amdgpu_dm_init(adev);
2260 amdgpu_dm_hpd_init(adev);
2266 * dm_hw_fini() - Teardown DC device
2267 * @handle: The base driver device containing the amdgpu_dm device.
2269 * Teardown components within &struct amdgpu_display_manager that require
2270 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2271 * were loaded. Also flush IRQ workqueues and disable them.
2273 static int dm_hw_fini(void *handle)
2275 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2277 amdgpu_dm_hpd_fini(adev);
2279 amdgpu_dm_irq_fini(adev);
2280 amdgpu_dm_fini(adev);
2285 static int dm_enable_vblank(struct drm_crtc *crtc);
2286 static void dm_disable_vblank(struct drm_crtc *crtc);
2288 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2289 struct dc_state *state, bool enable)
2291 enum dc_irq_source irq_source;
2292 struct amdgpu_crtc *acrtc;
2296 for (i = 0; i < state->stream_count; i++) {
2297 acrtc = get_crtc_by_otg_inst(
2298 adev, state->stream_status[i].primary_otg_inst);
2300 if (acrtc && state->stream_status[i].plane_count != 0) {
2301 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2302 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2303 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2304 acrtc->crtc_id, enable ? "en" : "dis", rc);
2306 DRM_WARN("Failed to %s pflip interrupts\n",
2307 enable ? "enable" : "disable");
2310 rc = dm_enable_vblank(&acrtc->base);
2312 DRM_WARN("Failed to enable vblank interrupts\n");
2314 dm_disable_vblank(&acrtc->base);
2322 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2324 struct dc_state *context = NULL;
2325 enum dc_status res = DC_ERROR_UNEXPECTED;
2327 struct dc_stream_state *del_streams[MAX_PIPES];
2328 int del_streams_count = 0;
2330 memset(del_streams, 0, sizeof(del_streams));
2332 context = dc_create_state(dc);
2333 if (context == NULL)
2334 goto context_alloc_fail;
2336 dc_resource_state_copy_construct_current(dc, context);
2338 /* First remove from context all streams */
2339 for (i = 0; i < context->stream_count; i++) {
2340 struct dc_stream_state *stream = context->streams[i];
2342 del_streams[del_streams_count++] = stream;
2345 /* Remove all planes for removed streams and then remove the streams */
2346 for (i = 0; i < del_streams_count; i++) {
2347 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2348 res = DC_FAIL_DETACH_SURFACES;
2352 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2357 res = dc_commit_state(dc, context);
2360 dc_release_state(context);
2366 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2370 if (dm->hpd_rx_offload_wq) {
2371 for (i = 0; i < dm->dc->caps.max_links; i++)
2372 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2376 static int dm_suspend(void *handle)
2378 struct amdgpu_device *adev = handle;
2379 struct amdgpu_display_manager *dm = &adev->dm;
2382 if (amdgpu_in_reset(adev)) {
2383 mutex_lock(&dm->dc_lock);
2385 #if defined(CONFIG_DRM_AMD_DC_DCN)
2386 dc_allow_idle_optimizations(adev->dm.dc, false);
2389 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2391 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2393 amdgpu_dm_commit_zero_streams(dm->dc);
2395 amdgpu_dm_irq_suspend(adev);
2397 hpd_rx_irq_work_suspend(dm);
2402 WARN_ON(adev->dm.cached_state);
2403 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2405 s3_handle_mst(adev_to_drm(adev), true);
2407 amdgpu_dm_irq_suspend(adev);
2409 hpd_rx_irq_work_suspend(dm);
2411 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2416 static struct amdgpu_dm_connector *
2417 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2418 struct drm_crtc *crtc)
2421 struct drm_connector_state *new_con_state;
2422 struct drm_connector *connector;
2423 struct drm_crtc *crtc_from_state;
2425 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2426 crtc_from_state = new_con_state->crtc;
2428 if (crtc_from_state == crtc)
2429 return to_amdgpu_dm_connector(connector);
2435 static void emulated_link_detect(struct dc_link *link)
2437 struct dc_sink_init_data sink_init_data = { 0 };
2438 struct display_sink_capability sink_caps = { 0 };
2439 enum dc_edid_status edid_status;
2440 struct dc_context *dc_ctx = link->ctx;
2441 struct dc_sink *sink = NULL;
2442 struct dc_sink *prev_sink = NULL;
2444 link->type = dc_connection_none;
2445 prev_sink = link->local_sink;
2448 dc_sink_release(prev_sink);
2450 switch (link->connector_signal) {
2451 case SIGNAL_TYPE_HDMI_TYPE_A: {
2452 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2453 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2457 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2458 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2459 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2463 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2464 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2465 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2469 case SIGNAL_TYPE_LVDS: {
2470 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2471 sink_caps.signal = SIGNAL_TYPE_LVDS;
2475 case SIGNAL_TYPE_EDP: {
2476 sink_caps.transaction_type =
2477 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2478 sink_caps.signal = SIGNAL_TYPE_EDP;
2482 case SIGNAL_TYPE_DISPLAY_PORT: {
2483 sink_caps.transaction_type =
2484 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2485 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2490 DC_ERROR("Invalid connector type! signal:%d\n",
2491 link->connector_signal);
2495 sink_init_data.link = link;
2496 sink_init_data.sink_signal = sink_caps.signal;
2498 sink = dc_sink_create(&sink_init_data);
2500 DC_ERROR("Failed to create sink!\n");
2504 /* dc_sink_create returns a new reference */
2505 link->local_sink = sink;
2507 edid_status = dm_helpers_read_local_edid(
2512 if (edid_status != EDID_OK)
2513 DC_ERROR("Failed to read EDID");
2517 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2518 struct amdgpu_display_manager *dm)
2521 struct dc_surface_update surface_updates[MAX_SURFACES];
2522 struct dc_plane_info plane_infos[MAX_SURFACES];
2523 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2524 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2525 struct dc_stream_update stream_update;
2529 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2532 dm_error("Failed to allocate update bundle\n");
2536 for (k = 0; k < dc_state->stream_count; k++) {
2537 bundle->stream_update.stream = dc_state->streams[k];
2539 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2540 bundle->surface_updates[m].surface =
2541 dc_state->stream_status->plane_states[m];
2542 bundle->surface_updates[m].surface->force_full_update =
2545 dc_commit_updates_for_stream(
2546 dm->dc, bundle->surface_updates,
2547 dc_state->stream_status->plane_count,
2548 dc_state->streams[k], &bundle->stream_update, dc_state);
2557 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2559 struct dc_stream_state *stream_state;
2560 struct amdgpu_dm_connector *aconnector = link->priv;
2561 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2562 struct dc_stream_update stream_update;
2563 bool dpms_off = true;
2565 memset(&stream_update, 0, sizeof(stream_update));
2566 stream_update.dpms_off = &dpms_off;
2568 mutex_lock(&adev->dm.dc_lock);
2569 stream_state = dc_stream_find_from_link(link);
2571 if (stream_state == NULL) {
2572 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2573 mutex_unlock(&adev->dm.dc_lock);
2577 stream_update.stream = stream_state;
2578 acrtc_state->force_dpms_off = true;
2579 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2580 stream_state, &stream_update,
2581 stream_state->ctx->dc->current_state);
2582 mutex_unlock(&adev->dm.dc_lock);
2585 static int dm_resume(void *handle)
2587 struct amdgpu_device *adev = handle;
2588 struct drm_device *ddev = adev_to_drm(adev);
2589 struct amdgpu_display_manager *dm = &adev->dm;
2590 struct amdgpu_dm_connector *aconnector;
2591 struct drm_connector *connector;
2592 struct drm_connector_list_iter iter;
2593 struct drm_crtc *crtc;
2594 struct drm_crtc_state *new_crtc_state;
2595 struct dm_crtc_state *dm_new_crtc_state;
2596 struct drm_plane *plane;
2597 struct drm_plane_state *new_plane_state;
2598 struct dm_plane_state *dm_new_plane_state;
2599 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2600 enum dc_connection_type new_connection_type = dc_connection_none;
2601 struct dc_state *dc_state;
2604 if (amdgpu_in_reset(adev)) {
2605 dc_state = dm->cached_dc_state;
2608 * The dc->current_state is backed up into dm->cached_dc_state
2609 * before we commit 0 streams.
2611 * DC will clear link encoder assignments on the real state
2612 * but the changes won't propagate over to the copy we made
2613 * before the 0 streams commit.
2615 * DC expects that link encoder assignments are *not* valid
2616 * when committing a state, so as a workaround it needs to be
2619 link_enc_cfg_init(dm->dc, dc_state);
2621 if (dc_enable_dmub_notifications(adev->dm.dc))
2622 amdgpu_dm_outbox_init(adev);
2624 r = dm_dmub_hw_init(adev);
2626 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2628 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2631 amdgpu_dm_irq_resume_early(adev);
2633 for (i = 0; i < dc_state->stream_count; i++) {
2634 dc_state->streams[i]->mode_changed = true;
2635 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2636 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2641 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2643 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2645 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2647 dc_release_state(dm->cached_dc_state);
2648 dm->cached_dc_state = NULL;
2650 amdgpu_dm_irq_resume_late(adev);
2652 mutex_unlock(&dm->dc_lock);
2656 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2657 dc_release_state(dm_state->context);
2658 dm_state->context = dc_create_state(dm->dc);
2659 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2660 dc_resource_state_construct(dm->dc, dm_state->context);
2662 /* Re-enable outbox interrupts for DPIA. */
2663 if (dc_enable_dmub_notifications(adev->dm.dc))
2664 amdgpu_dm_outbox_init(adev);
2666 /* Before powering on DC we need to re-initialize DMUB. */
2667 dm_dmub_hw_resume(adev);
2669 /* power on hardware */
2670 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2672 /* program HPD filter */
2676 * early enable HPD Rx IRQ, should be done before set mode as short
2677 * pulse interrupts are used for MST
2679 amdgpu_dm_irq_resume_early(adev);
2681 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2682 s3_handle_mst(ddev, false);
2685 drm_connector_list_iter_begin(ddev, &iter);
2686 drm_for_each_connector_iter(connector, &iter) {
2687 aconnector = to_amdgpu_dm_connector(connector);
2690 * this is the case when traversing through already created
2691 * MST connectors, should be skipped
2693 if (aconnector->mst_port)
2696 mutex_lock(&aconnector->hpd_lock);
2697 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2698 DRM_ERROR("KMS: Failed to detect connector\n");
2700 if (aconnector->base.force && new_connection_type == dc_connection_none)
2701 emulated_link_detect(aconnector->dc_link);
2703 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2705 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2706 aconnector->fake_enable = false;
2708 if (aconnector->dc_sink)
2709 dc_sink_release(aconnector->dc_sink);
2710 aconnector->dc_sink = NULL;
2711 amdgpu_dm_update_connector_after_detect(aconnector);
2712 mutex_unlock(&aconnector->hpd_lock);
2714 drm_connector_list_iter_end(&iter);
2716 /* Force mode set in atomic commit */
2717 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2718 new_crtc_state->active_changed = true;
2721 * atomic_check is expected to create the dc states. We need to release
2722 * them here, since they were duplicated as part of the suspend
2725 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2726 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2727 if (dm_new_crtc_state->stream) {
2728 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2729 dc_stream_release(dm_new_crtc_state->stream);
2730 dm_new_crtc_state->stream = NULL;
2734 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2735 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2736 if (dm_new_plane_state->dc_state) {
2737 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2738 dc_plane_state_release(dm_new_plane_state->dc_state);
2739 dm_new_plane_state->dc_state = NULL;
2743 drm_atomic_helper_resume(ddev, dm->cached_state);
2745 dm->cached_state = NULL;
2747 amdgpu_dm_irq_resume_late(adev);
2749 amdgpu_dm_smu_write_watermarks_table(adev);
2757 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2758 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2759 * the base driver's device list to be initialized and torn down accordingly.
2761 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2764 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2766 .early_init = dm_early_init,
2767 .late_init = dm_late_init,
2768 .sw_init = dm_sw_init,
2769 .sw_fini = dm_sw_fini,
2770 .early_fini = amdgpu_dm_early_fini,
2771 .hw_init = dm_hw_init,
2772 .hw_fini = dm_hw_fini,
2773 .suspend = dm_suspend,
2774 .resume = dm_resume,
2775 .is_idle = dm_is_idle,
2776 .wait_for_idle = dm_wait_for_idle,
2777 .check_soft_reset = dm_check_soft_reset,
2778 .soft_reset = dm_soft_reset,
2779 .set_clockgating_state = dm_set_clockgating_state,
2780 .set_powergating_state = dm_set_powergating_state,
2783 const struct amdgpu_ip_block_version dm_ip_block =
2785 .type = AMD_IP_BLOCK_TYPE_DCE,
2789 .funcs = &amdgpu_dm_funcs,
2799 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2800 .fb_create = amdgpu_display_user_framebuffer_create,
2801 .get_format_info = amd_get_format_info,
2802 .output_poll_changed = drm_fb_helper_output_poll_changed,
2803 .atomic_check = amdgpu_dm_atomic_check,
2804 .atomic_commit = drm_atomic_helper_commit,
2807 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2808 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2811 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2813 u32 max_cll, min_cll, max, min, q, r;
2814 struct amdgpu_dm_backlight_caps *caps;
2815 struct amdgpu_display_manager *dm;
2816 struct drm_connector *conn_base;
2817 struct amdgpu_device *adev;
2818 struct dc_link *link = NULL;
2819 static const u8 pre_computed_values[] = {
2820 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2821 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2824 if (!aconnector || !aconnector->dc_link)
2827 link = aconnector->dc_link;
2828 if (link->connector_signal != SIGNAL_TYPE_EDP)
2831 conn_base = &aconnector->base;
2832 adev = drm_to_adev(conn_base->dev);
2834 for (i = 0; i < dm->num_of_edps; i++) {
2835 if (link == dm->backlight_link[i])
2838 if (i >= dm->num_of_edps)
2840 caps = &dm->backlight_caps[i];
2841 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2842 caps->aux_support = false;
2843 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2844 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2846 if (caps->ext_caps->bits.oled == 1 /*||
2847 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2848 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2849 caps->aux_support = true;
2851 if (amdgpu_backlight == 0)
2852 caps->aux_support = false;
2853 else if (amdgpu_backlight == 1)
2854 caps->aux_support = true;
2856 /* From the specification (CTA-861-G), for calculating the maximum
2857 * luminance we need to use:
2858 * Luminance = 50*2**(CV/32)
2859 * Where CV is a one-byte value.
2860 * For calculating this expression we may need float point precision;
2861 * to avoid this complexity level, we take advantage that CV is divided
2862 * by a constant. From the Euclids division algorithm, we know that CV
2863 * can be written as: CV = 32*q + r. Next, we replace CV in the
2864 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2865 * need to pre-compute the value of r/32. For pre-computing the values
2866 * We just used the following Ruby line:
2867 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2868 * The results of the above expressions can be verified at
2869 * pre_computed_values.
2873 max = (1 << q) * pre_computed_values[r];
2875 // min luminance: maxLum * (CV/255)^2 / 100
2876 q = DIV_ROUND_CLOSEST(min_cll, 255);
2877 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2879 caps->aux_max_input_signal = max;
2880 caps->aux_min_input_signal = min;
2883 void amdgpu_dm_update_connector_after_detect(
2884 struct amdgpu_dm_connector *aconnector)
2886 struct drm_connector *connector = &aconnector->base;
2887 struct drm_device *dev = connector->dev;
2888 struct dc_sink *sink;
2890 /* MST handled by drm_mst framework */
2891 if (aconnector->mst_mgr.mst_state == true)
2894 sink = aconnector->dc_link->local_sink;
2896 dc_sink_retain(sink);
2899 * Edid mgmt connector gets first update only in mode_valid hook and then
2900 * the connector sink is set to either fake or physical sink depends on link status.
2901 * Skip if already done during boot.
2903 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2904 && aconnector->dc_em_sink) {
2907 * For S3 resume with headless use eml_sink to fake stream
2908 * because on resume connector->sink is set to NULL
2910 mutex_lock(&dev->mode_config.mutex);
2913 if (aconnector->dc_sink) {
2914 amdgpu_dm_update_freesync_caps(connector, NULL);
2916 * retain and release below are used to
2917 * bump up refcount for sink because the link doesn't point
2918 * to it anymore after disconnect, so on next crtc to connector
2919 * reshuffle by UMD we will get into unwanted dc_sink release
2921 dc_sink_release(aconnector->dc_sink);
2923 aconnector->dc_sink = sink;
2924 dc_sink_retain(aconnector->dc_sink);
2925 amdgpu_dm_update_freesync_caps(connector,
2928 amdgpu_dm_update_freesync_caps(connector, NULL);
2929 if (!aconnector->dc_sink) {
2930 aconnector->dc_sink = aconnector->dc_em_sink;
2931 dc_sink_retain(aconnector->dc_sink);
2935 mutex_unlock(&dev->mode_config.mutex);
2938 dc_sink_release(sink);
2943 * TODO: temporary guard to look for proper fix
2944 * if this sink is MST sink, we should not do anything
2946 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2947 dc_sink_release(sink);
2951 if (aconnector->dc_sink == sink) {
2953 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2956 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2957 aconnector->connector_id);
2959 dc_sink_release(sink);
2963 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2964 aconnector->connector_id, aconnector->dc_sink, sink);
2966 mutex_lock(&dev->mode_config.mutex);
2969 * 1. Update status of the drm connector
2970 * 2. Send an event and let userspace tell us what to do
2974 * TODO: check if we still need the S3 mode update workaround.
2975 * If yes, put it here.
2977 if (aconnector->dc_sink) {
2978 amdgpu_dm_update_freesync_caps(connector, NULL);
2979 dc_sink_release(aconnector->dc_sink);
2982 aconnector->dc_sink = sink;
2983 dc_sink_retain(aconnector->dc_sink);
2984 if (sink->dc_edid.length == 0) {
2985 aconnector->edid = NULL;
2986 if (aconnector->dc_link->aux_mode) {
2987 drm_dp_cec_unset_edid(
2988 &aconnector->dm_dp_aux.aux);
2992 (struct edid *)sink->dc_edid.raw_edid;
2994 if (aconnector->dc_link->aux_mode)
2995 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2999 drm_connector_update_edid_property(connector, aconnector->edid);
3000 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3001 update_connector_ext_caps(aconnector);
3003 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3004 amdgpu_dm_update_freesync_caps(connector, NULL);
3005 drm_connector_update_edid_property(connector, NULL);
3006 aconnector->num_modes = 0;
3007 dc_sink_release(aconnector->dc_sink);
3008 aconnector->dc_sink = NULL;
3009 aconnector->edid = NULL;
3010 #ifdef CONFIG_DRM_AMD_DC_HDCP
3011 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3012 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3013 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3017 mutex_unlock(&dev->mode_config.mutex);
3019 update_subconnector_property(aconnector);
3022 dc_sink_release(sink);
3025 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3027 struct drm_connector *connector = &aconnector->base;
3028 struct drm_device *dev = connector->dev;
3029 enum dc_connection_type new_connection_type = dc_connection_none;
3030 struct amdgpu_device *adev = drm_to_adev(dev);
3031 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3032 struct dm_crtc_state *dm_crtc_state = NULL;
3034 if (adev->dm.disable_hpd_irq)
3037 if (dm_con_state->base.state && dm_con_state->base.crtc)
3038 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3039 dm_con_state->base.state,
3040 dm_con_state->base.crtc));
3042 * In case of failure or MST no need to update connector status or notify the OS
3043 * since (for MST case) MST does this in its own context.
3045 mutex_lock(&aconnector->hpd_lock);
3047 #ifdef CONFIG_DRM_AMD_DC_HDCP
3048 if (adev->dm.hdcp_workqueue) {
3049 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3050 dm_con_state->update_hdcp = true;
3053 if (aconnector->fake_enable)
3054 aconnector->fake_enable = false;
3056 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3057 DRM_ERROR("KMS: Failed to detect connector\n");
3059 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3060 emulated_link_detect(aconnector->dc_link);
3062 drm_modeset_lock_all(dev);
3063 dm_restore_drm_connector_state(dev, connector);
3064 drm_modeset_unlock_all(dev);
3066 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3067 drm_kms_helper_connector_hotplug_event(connector);
3069 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3070 if (new_connection_type == dc_connection_none &&
3071 aconnector->dc_link->type == dc_connection_none &&
3073 dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3075 amdgpu_dm_update_connector_after_detect(aconnector);
3077 drm_modeset_lock_all(dev);
3078 dm_restore_drm_connector_state(dev, connector);
3079 drm_modeset_unlock_all(dev);
3081 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3082 drm_kms_helper_connector_hotplug_event(connector);
3084 mutex_unlock(&aconnector->hpd_lock);
3088 static void handle_hpd_irq(void *param)
3090 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3092 handle_hpd_irq_helper(aconnector);
3096 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3098 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3100 bool new_irq_handled = false;
3102 int dpcd_bytes_to_read;
3104 const int max_process_count = 30;
3105 int process_count = 0;
3107 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3109 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3110 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3111 /* DPCD 0x200 - 0x201 for downstream IRQ */
3112 dpcd_addr = DP_SINK_COUNT;
3114 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3115 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3116 dpcd_addr = DP_SINK_COUNT_ESI;
3119 dret = drm_dp_dpcd_read(
3120 &aconnector->dm_dp_aux.aux,
3123 dpcd_bytes_to_read);
3125 while (dret == dpcd_bytes_to_read &&
3126 process_count < max_process_count) {
3132 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3133 /* handle HPD short pulse irq */
3134 if (aconnector->mst_mgr.mst_state)
3136 &aconnector->mst_mgr,
3140 if (new_irq_handled) {
3141 /* ACK at DPCD to notify down stream */
3142 const int ack_dpcd_bytes_to_write =
3143 dpcd_bytes_to_read - 1;
3145 for (retry = 0; retry < 3; retry++) {
3148 wret = drm_dp_dpcd_write(
3149 &aconnector->dm_dp_aux.aux,
3152 ack_dpcd_bytes_to_write);
3153 if (wret == ack_dpcd_bytes_to_write)
3157 /* check if there is new irq to be handled */
3158 dret = drm_dp_dpcd_read(
3159 &aconnector->dm_dp_aux.aux,
3162 dpcd_bytes_to_read);
3164 new_irq_handled = false;
3170 if (process_count == max_process_count)
3171 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3174 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3175 union hpd_irq_data hpd_irq_data)
3177 struct hpd_rx_irq_offload_work *offload_work =
3178 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3180 if (!offload_work) {
3181 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3185 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3186 offload_work->data = hpd_irq_data;
3187 offload_work->offload_wq = offload_wq;
3189 queue_work(offload_wq->wq, &offload_work->work);
3190 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3193 static void handle_hpd_rx_irq(void *param)
3195 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3196 struct drm_connector *connector = &aconnector->base;
3197 struct drm_device *dev = connector->dev;
3198 struct dc_link *dc_link = aconnector->dc_link;
3199 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3200 bool result = false;
3201 enum dc_connection_type new_connection_type = dc_connection_none;
3202 struct amdgpu_device *adev = drm_to_adev(dev);
3203 union hpd_irq_data hpd_irq_data;
3204 bool link_loss = false;
3205 bool has_left_work = false;
3206 int idx = aconnector->base.index;
3207 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3209 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3211 if (adev->dm.disable_hpd_irq)
3215 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3216 * conflict, after implement i2c helper, this mutex should be
3219 mutex_lock(&aconnector->hpd_lock);
3221 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3222 &link_loss, true, &has_left_work);
3227 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3228 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3232 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3233 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3234 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3235 dm_handle_mst_sideband_msg(aconnector);
3242 spin_lock(&offload_wq->offload_lock);
3243 skip = offload_wq->is_handling_link_loss;
3246 offload_wq->is_handling_link_loss = true;
3248 spin_unlock(&offload_wq->offload_lock);
3251 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3258 if (result && !is_mst_root_connector) {
3259 /* Downstream Port status changed. */
3260 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3261 DRM_ERROR("KMS: Failed to detect connector\n");
3263 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3264 emulated_link_detect(dc_link);
3266 if (aconnector->fake_enable)
3267 aconnector->fake_enable = false;
3269 amdgpu_dm_update_connector_after_detect(aconnector);
3272 drm_modeset_lock_all(dev);
3273 dm_restore_drm_connector_state(dev, connector);
3274 drm_modeset_unlock_all(dev);
3276 drm_kms_helper_connector_hotplug_event(connector);
3277 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3279 if (aconnector->fake_enable)
3280 aconnector->fake_enable = false;
3282 amdgpu_dm_update_connector_after_detect(aconnector);
3285 drm_modeset_lock_all(dev);
3286 dm_restore_drm_connector_state(dev, connector);
3287 drm_modeset_unlock_all(dev);
3289 drm_kms_helper_connector_hotplug_event(connector);
3292 #ifdef CONFIG_DRM_AMD_DC_HDCP
3293 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3294 if (adev->dm.hdcp_workqueue)
3295 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3299 if (dc_link->type != dc_connection_mst_branch)
3300 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3302 mutex_unlock(&aconnector->hpd_lock);
3305 static void register_hpd_handlers(struct amdgpu_device *adev)
3307 struct drm_device *dev = adev_to_drm(adev);
3308 struct drm_connector *connector;
3309 struct amdgpu_dm_connector *aconnector;
3310 const struct dc_link *dc_link;
3311 struct dc_interrupt_params int_params = {0};
3313 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3314 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3316 list_for_each_entry(connector,
3317 &dev->mode_config.connector_list, head) {
3319 aconnector = to_amdgpu_dm_connector(connector);
3320 dc_link = aconnector->dc_link;
3322 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3323 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3324 int_params.irq_source = dc_link->irq_source_hpd;
3326 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3328 (void *) aconnector);
3331 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3333 /* Also register for DP short pulse (hpd_rx). */
3334 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3335 int_params.irq_source = dc_link->irq_source_hpd_rx;
3337 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3339 (void *) aconnector);
3341 if (adev->dm.hpd_rx_offload_wq)
3342 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3348 #if defined(CONFIG_DRM_AMD_DC_SI)
3349 /* Register IRQ sources and initialize IRQ callbacks */
3350 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3352 struct dc *dc = adev->dm.dc;
3353 struct common_irq_params *c_irq_params;
3354 struct dc_interrupt_params int_params = {0};
3357 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3359 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3360 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3363 * Actions of amdgpu_irq_add_id():
3364 * 1. Register a set() function with base driver.
3365 * Base driver will call set() function to enable/disable an
3366 * interrupt in DC hardware.
3367 * 2. Register amdgpu_dm_irq_handler().
3368 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3369 * coming from DC hardware.
3370 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3371 * for acknowledging and handling. */
3373 /* Use VBLANK interrupt */
3374 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3375 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3377 DRM_ERROR("Failed to add crtc irq id!\n");
3381 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3382 int_params.irq_source =
3383 dc_interrupt_to_irq_source(dc, i+1 , 0);
3385 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3387 c_irq_params->adev = adev;
3388 c_irq_params->irq_src = int_params.irq_source;
3390 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3391 dm_crtc_high_irq, c_irq_params);
3394 /* Use GRPH_PFLIP interrupt */
3395 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3396 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3397 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3399 DRM_ERROR("Failed to add page flip irq id!\n");
3403 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3404 int_params.irq_source =
3405 dc_interrupt_to_irq_source(dc, i, 0);
3407 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3409 c_irq_params->adev = adev;
3410 c_irq_params->irq_src = int_params.irq_source;
3412 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3413 dm_pflip_high_irq, c_irq_params);
3418 r = amdgpu_irq_add_id(adev, client_id,
3419 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3421 DRM_ERROR("Failed to add hpd irq id!\n");
3425 register_hpd_handlers(adev);
3431 /* Register IRQ sources and initialize IRQ callbacks */
3432 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3434 struct dc *dc = adev->dm.dc;
3435 struct common_irq_params *c_irq_params;
3436 struct dc_interrupt_params int_params = {0};
3439 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3441 if (adev->family >= AMDGPU_FAMILY_AI)
3442 client_id = SOC15_IH_CLIENTID_DCE;
3444 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3445 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3448 * Actions of amdgpu_irq_add_id():
3449 * 1. Register a set() function with base driver.
3450 * Base driver will call set() function to enable/disable an
3451 * interrupt in DC hardware.
3452 * 2. Register amdgpu_dm_irq_handler().
3453 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3454 * coming from DC hardware.
3455 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3456 * for acknowledging and handling. */
3458 /* Use VBLANK interrupt */
3459 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3460 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3462 DRM_ERROR("Failed to add crtc irq id!\n");
3466 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3467 int_params.irq_source =
3468 dc_interrupt_to_irq_source(dc, i, 0);
3470 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3472 c_irq_params->adev = adev;
3473 c_irq_params->irq_src = int_params.irq_source;
3475 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3476 dm_crtc_high_irq, c_irq_params);
3479 /* Use VUPDATE interrupt */
3480 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3481 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3483 DRM_ERROR("Failed to add vupdate irq id!\n");
3487 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3488 int_params.irq_source =
3489 dc_interrupt_to_irq_source(dc, i, 0);
3491 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3493 c_irq_params->adev = adev;
3494 c_irq_params->irq_src = int_params.irq_source;
3496 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3497 dm_vupdate_high_irq, c_irq_params);
3500 /* Use GRPH_PFLIP interrupt */
3501 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3502 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3503 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3505 DRM_ERROR("Failed to add page flip irq id!\n");
3509 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3510 int_params.irq_source =
3511 dc_interrupt_to_irq_source(dc, i, 0);
3513 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3515 c_irq_params->adev = adev;
3516 c_irq_params->irq_src = int_params.irq_source;
3518 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3519 dm_pflip_high_irq, c_irq_params);
3524 r = amdgpu_irq_add_id(adev, client_id,
3525 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3527 DRM_ERROR("Failed to add hpd irq id!\n");
3531 register_hpd_handlers(adev);
3536 #if defined(CONFIG_DRM_AMD_DC_DCN)
3537 /* Register IRQ sources and initialize IRQ callbacks */
3538 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3540 struct dc *dc = adev->dm.dc;
3541 struct common_irq_params *c_irq_params;
3542 struct dc_interrupt_params int_params = {0};
3545 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3546 static const unsigned int vrtl_int_srcid[] = {
3547 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3548 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3549 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3550 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3551 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3552 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3556 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3557 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3560 * Actions of amdgpu_irq_add_id():
3561 * 1. Register a set() function with base driver.
3562 * Base driver will call set() function to enable/disable an
3563 * interrupt in DC hardware.
3564 * 2. Register amdgpu_dm_irq_handler().
3565 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3566 * coming from DC hardware.
3567 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3568 * for acknowledging and handling.
3571 /* Use VSTARTUP interrupt */
3572 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3573 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3575 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3578 DRM_ERROR("Failed to add crtc irq id!\n");
3582 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3583 int_params.irq_source =
3584 dc_interrupt_to_irq_source(dc, i, 0);
3586 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3588 c_irq_params->adev = adev;
3589 c_irq_params->irq_src = int_params.irq_source;
3591 amdgpu_dm_irq_register_interrupt(
3592 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3595 /* Use otg vertical line interrupt */
3596 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3597 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3598 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3599 vrtl_int_srcid[i], &adev->vline0_irq);
3602 DRM_ERROR("Failed to add vline0 irq id!\n");
3606 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3607 int_params.irq_source =
3608 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3610 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3611 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3615 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3616 - DC_IRQ_SOURCE_DC1_VLINE0];
3618 c_irq_params->adev = adev;
3619 c_irq_params->irq_src = int_params.irq_source;
3621 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3622 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3626 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3627 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3628 * to trigger at end of each vblank, regardless of state of the lock,
3629 * matching DCE behaviour.
3631 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3632 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3634 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3637 DRM_ERROR("Failed to add vupdate irq id!\n");
3641 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3642 int_params.irq_source =
3643 dc_interrupt_to_irq_source(dc, i, 0);
3645 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3647 c_irq_params->adev = adev;
3648 c_irq_params->irq_src = int_params.irq_source;
3650 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3651 dm_vupdate_high_irq, c_irq_params);
3654 /* Use GRPH_PFLIP interrupt */
3655 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3656 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3658 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3660 DRM_ERROR("Failed to add page flip irq id!\n");
3664 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3665 int_params.irq_source =
3666 dc_interrupt_to_irq_source(dc, i, 0);
3668 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3670 c_irq_params->adev = adev;
3671 c_irq_params->irq_src = int_params.irq_source;
3673 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3674 dm_pflip_high_irq, c_irq_params);
3679 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3682 DRM_ERROR("Failed to add hpd irq id!\n");
3686 register_hpd_handlers(adev);
3690 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3691 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3693 struct dc *dc = adev->dm.dc;
3694 struct common_irq_params *c_irq_params;
3695 struct dc_interrupt_params int_params = {0};
3698 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3699 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3701 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3702 &adev->dmub_outbox_irq);
3704 DRM_ERROR("Failed to add outbox irq id!\n");
3708 if (dc->ctx->dmub_srv) {
3709 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3710 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3711 int_params.irq_source =
3712 dc_interrupt_to_irq_source(dc, i, 0);
3714 c_irq_params = &adev->dm.dmub_outbox_params[0];
3716 c_irq_params->adev = adev;
3717 c_irq_params->irq_src = int_params.irq_source;
3719 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3720 dm_dmub_outbox1_low_irq, c_irq_params);
3728 * Acquires the lock for the atomic state object and returns
3729 * the new atomic state.
3731 * This should only be called during atomic check.
3733 static int dm_atomic_get_state(struct drm_atomic_state *state,
3734 struct dm_atomic_state **dm_state)
3736 struct drm_device *dev = state->dev;
3737 struct amdgpu_device *adev = drm_to_adev(dev);
3738 struct amdgpu_display_manager *dm = &adev->dm;
3739 struct drm_private_state *priv_state;
3744 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3745 if (IS_ERR(priv_state))
3746 return PTR_ERR(priv_state);
3748 *dm_state = to_dm_atomic_state(priv_state);
3753 static struct dm_atomic_state *
3754 dm_atomic_get_new_state(struct drm_atomic_state *state)
3756 struct drm_device *dev = state->dev;
3757 struct amdgpu_device *adev = drm_to_adev(dev);
3758 struct amdgpu_display_manager *dm = &adev->dm;
3759 struct drm_private_obj *obj;
3760 struct drm_private_state *new_obj_state;
3763 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3764 if (obj->funcs == dm->atomic_obj.funcs)
3765 return to_dm_atomic_state(new_obj_state);
3771 static struct drm_private_state *
3772 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3774 struct dm_atomic_state *old_state, *new_state;
3776 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3780 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3782 old_state = to_dm_atomic_state(obj->state);
3784 if (old_state && old_state->context)
3785 new_state->context = dc_copy_state(old_state->context);
3787 if (!new_state->context) {
3792 return &new_state->base;
3795 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3796 struct drm_private_state *state)
3798 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3800 if (dm_state && dm_state->context)
3801 dc_release_state(dm_state->context);
3806 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3807 .atomic_duplicate_state = dm_atomic_duplicate_state,
3808 .atomic_destroy_state = dm_atomic_destroy_state,
3811 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3813 struct dm_atomic_state *state;
3816 adev->mode_info.mode_config_initialized = true;
3818 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3819 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3821 adev_to_drm(adev)->mode_config.max_width = 16384;
3822 adev_to_drm(adev)->mode_config.max_height = 16384;
3824 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3825 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3826 /* indicates support for immediate flip */
3827 adev_to_drm(adev)->mode_config.async_page_flip = true;
3829 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3831 state = kzalloc(sizeof(*state), GFP_KERNEL);
3835 state->context = dc_create_state(adev->dm.dc);
3836 if (!state->context) {
3841 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3843 drm_atomic_private_obj_init(adev_to_drm(adev),
3844 &adev->dm.atomic_obj,
3846 &dm_atomic_state_funcs);
3848 r = amdgpu_display_modeset_create_props(adev);
3850 dc_release_state(state->context);
3855 r = amdgpu_dm_audio_init(adev);
3857 dc_release_state(state->context);
3865 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3866 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3867 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3869 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3870 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3872 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3875 #if defined(CONFIG_ACPI)
3876 struct amdgpu_dm_backlight_caps caps;
3878 memset(&caps, 0, sizeof(caps));
3880 if (dm->backlight_caps[bl_idx].caps_valid)
3883 amdgpu_acpi_get_backlight_caps(&caps);
3884 if (caps.caps_valid) {
3885 dm->backlight_caps[bl_idx].caps_valid = true;
3886 if (caps.aux_support)
3888 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3889 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3891 dm->backlight_caps[bl_idx].min_input_signal =
3892 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3893 dm->backlight_caps[bl_idx].max_input_signal =
3894 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3897 if (dm->backlight_caps[bl_idx].aux_support)
3900 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3901 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3905 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3906 unsigned *min, unsigned *max)
3911 if (caps->aux_support) {
3912 // Firmware limits are in nits, DC API wants millinits.
3913 *max = 1000 * caps->aux_max_input_signal;
3914 *min = 1000 * caps->aux_min_input_signal;
3916 // Firmware limits are 8-bit, PWM control is 16-bit.
3917 *max = 0x101 * caps->max_input_signal;
3918 *min = 0x101 * caps->min_input_signal;
3923 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3924 uint32_t brightness)
3928 if (!get_brightness_range(caps, &min, &max))
3931 // Rescale 0..255 to min..max
3932 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3933 AMDGPU_MAX_BL_LEVEL);
3936 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3937 uint32_t brightness)
3941 if (!get_brightness_range(caps, &min, &max))
3944 if (brightness < min)
3946 // Rescale min..max to 0..255
3947 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3951 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3953 u32 user_brightness)
3955 struct amdgpu_dm_backlight_caps caps;
3956 struct dc_link *link;
3960 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3961 caps = dm->backlight_caps[bl_idx];
3963 dm->brightness[bl_idx] = user_brightness;
3964 /* update scratch register */
3966 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3967 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3968 link = (struct dc_link *)dm->backlight_link[bl_idx];
3970 /* Change brightness based on AUX property */
3971 if (caps.aux_support) {
3972 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3973 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3975 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3977 rc = dc_link_set_backlight_level(link, brightness, 0);
3979 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3985 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3987 struct amdgpu_display_manager *dm = bl_get_data(bd);
3990 for (i = 0; i < dm->num_of_edps; i++) {
3991 if (bd == dm->backlight_dev[i])
3994 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3996 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4001 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4004 struct amdgpu_dm_backlight_caps caps;
4005 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4007 amdgpu_dm_update_backlight_caps(dm, bl_idx);
4008 caps = dm->backlight_caps[bl_idx];
4010 if (caps.aux_support) {
4014 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4016 return dm->brightness[bl_idx];
4017 return convert_brightness_to_user(&caps, avg);
4019 int ret = dc_link_get_backlight_level(link);
4021 if (ret == DC_ERROR_UNEXPECTED)
4022 return dm->brightness[bl_idx];
4023 return convert_brightness_to_user(&caps, ret);
4027 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4029 struct amdgpu_display_manager *dm = bl_get_data(bd);
4032 for (i = 0; i < dm->num_of_edps; i++) {
4033 if (bd == dm->backlight_dev[i])
4036 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4038 return amdgpu_dm_backlight_get_level(dm, i);
4041 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4042 .options = BL_CORE_SUSPENDRESUME,
4043 .get_brightness = amdgpu_dm_backlight_get_brightness,
4044 .update_status = amdgpu_dm_backlight_update_status,
4048 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4051 struct backlight_properties props = { 0 };
4053 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4054 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4056 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4057 props.brightness = AMDGPU_MAX_BL_LEVEL;
4058 props.type = BACKLIGHT_RAW;
4060 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4061 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4063 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4064 adev_to_drm(dm->adev)->dev,
4066 &amdgpu_dm_backlight_ops,
4069 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4070 DRM_ERROR("DM: Backlight registration failed!\n");
4072 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4076 static int initialize_plane(struct amdgpu_display_manager *dm,
4077 struct amdgpu_mode_info *mode_info, int plane_id,
4078 enum drm_plane_type plane_type,
4079 const struct dc_plane_cap *plane_cap)
4081 struct drm_plane *plane;
4082 unsigned long possible_crtcs;
4085 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4087 DRM_ERROR("KMS: Failed to allocate plane\n");
4090 plane->type = plane_type;
4093 * HACK: IGT tests expect that the primary plane for a CRTC
4094 * can only have one possible CRTC. Only expose support for
4095 * any CRTC if they're not going to be used as a primary plane
4096 * for a CRTC - like overlay or underlay planes.
4098 possible_crtcs = 1 << plane_id;
4099 if (plane_id >= dm->dc->caps.max_streams)
4100 possible_crtcs = 0xff;
4102 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4105 DRM_ERROR("KMS: Failed to initialize plane\n");
4111 mode_info->planes[plane_id] = plane;
4117 static void register_backlight_device(struct amdgpu_display_manager *dm,
4118 struct dc_link *link)
4120 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4121 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4123 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4124 link->type != dc_connection_none) {
4126 * Event if registration failed, we should continue with
4127 * DM initialization because not having a backlight control
4128 * is better then a black screen.
4130 if (!dm->backlight_dev[dm->num_of_edps])
4131 amdgpu_dm_register_backlight_device(dm);
4133 if (dm->backlight_dev[dm->num_of_edps]) {
4134 dm->backlight_link[dm->num_of_edps] = link;
4143 * In this architecture, the association
4144 * connector -> encoder -> crtc
4145 * id not really requried. The crtc and connector will hold the
4146 * display_index as an abstraction to use with DAL component
4148 * Returns 0 on success
4150 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4152 struct amdgpu_display_manager *dm = &adev->dm;
4154 struct amdgpu_dm_connector *aconnector = NULL;
4155 struct amdgpu_encoder *aencoder = NULL;
4156 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4158 int32_t primary_planes;
4159 enum dc_connection_type new_connection_type = dc_connection_none;
4160 const struct dc_plane_cap *plane;
4161 bool psr_feature_enabled = false;
4163 dm->display_indexes_num = dm->dc->caps.max_streams;
4164 /* Update the actual used number of crtc */
4165 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4167 link_cnt = dm->dc->caps.max_links;
4168 if (amdgpu_dm_mode_config_init(dm->adev)) {
4169 DRM_ERROR("DM: Failed to initialize mode config\n");
4173 /* There is one primary plane per CRTC */
4174 primary_planes = dm->dc->caps.max_streams;
4175 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4178 * Initialize primary planes, implicit planes for legacy IOCTLS.
4179 * Order is reversed to match iteration order in atomic check.
4181 for (i = (primary_planes - 1); i >= 0; i--) {
4182 plane = &dm->dc->caps.planes[i];
4184 if (initialize_plane(dm, mode_info, i,
4185 DRM_PLANE_TYPE_PRIMARY, plane)) {
4186 DRM_ERROR("KMS: Failed to initialize primary plane\n");
4192 * Initialize overlay planes, index starting after primary planes.
4193 * These planes have a higher DRM index than the primary planes since
4194 * they should be considered as having a higher z-order.
4195 * Order is reversed to match iteration order in atomic check.
4197 * Only support DCN for now, and only expose one so we don't encourage
4198 * userspace to use up all the pipes.
4200 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4201 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4203 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4206 if (!plane->blends_with_above || !plane->blends_with_below)
4209 if (!plane->pixel_format_support.argb8888)
4212 if (initialize_plane(dm, NULL, primary_planes + i,
4213 DRM_PLANE_TYPE_OVERLAY, plane)) {
4214 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4218 /* Only create one overlay plane. */
4222 for (i = 0; i < dm->dc->caps.max_streams; i++)
4223 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4224 DRM_ERROR("KMS: Failed to initialize crtc\n");
4228 #if defined(CONFIG_DRM_AMD_DC_DCN)
4229 /* Use Outbox interrupt */
4230 switch (adev->ip_versions[DCE_HWIP][0]) {
4231 case IP_VERSION(3, 0, 0):
4232 case IP_VERSION(3, 1, 2):
4233 case IP_VERSION(3, 1, 3):
4234 case IP_VERSION(2, 1, 0):
4235 if (register_outbox_irq_handlers(dm->adev)) {
4236 DRM_ERROR("DM: Failed to initialize IRQ\n");
4241 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4242 adev->ip_versions[DCE_HWIP][0]);
4245 /* Determine whether to enable PSR support by default. */
4246 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4247 switch (adev->ip_versions[DCE_HWIP][0]) {
4248 case IP_VERSION(3, 1, 2):
4249 case IP_VERSION(3, 1, 3):
4250 psr_feature_enabled = true;
4253 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4259 /* loops over all connectors on the board */
4260 for (i = 0; i < link_cnt; i++) {
4261 struct dc_link *link = NULL;
4263 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4265 "KMS: Cannot support more than %d display indexes\n",
4266 AMDGPU_DM_MAX_DISPLAY_INDEX);
4270 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4274 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4278 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4279 DRM_ERROR("KMS: Failed to initialize encoder\n");
4283 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4284 DRM_ERROR("KMS: Failed to initialize connector\n");
4288 link = dc_get_link_at_index(dm->dc, i);
4290 if (!dc_link_detect_sink(link, &new_connection_type))
4291 DRM_ERROR("KMS: Failed to detect connector\n");
4293 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4294 emulated_link_detect(link);
4295 amdgpu_dm_update_connector_after_detect(aconnector);
4297 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4298 amdgpu_dm_update_connector_after_detect(aconnector);
4299 register_backlight_device(dm, link);
4300 if (dm->num_of_edps)
4301 update_connector_ext_caps(aconnector);
4302 if (psr_feature_enabled)
4303 amdgpu_dm_set_psr_caps(link);
4310 * Disable vblank IRQs aggressively for power-saving.
4312 * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR
4313 * is also supported.
4315 adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled;
4317 /* Software is initialized. Now we can register interrupt handlers. */
4318 switch (adev->asic_type) {
4319 #if defined(CONFIG_DRM_AMD_DC_SI)
4324 if (dce60_register_irq_handlers(dm->adev)) {
4325 DRM_ERROR("DM: Failed to initialize IRQ\n");
4339 case CHIP_POLARIS11:
4340 case CHIP_POLARIS10:
4341 case CHIP_POLARIS12:
4346 if (dce110_register_irq_handlers(dm->adev)) {
4347 DRM_ERROR("DM: Failed to initialize IRQ\n");
4352 #if defined(CONFIG_DRM_AMD_DC_DCN)
4353 switch (adev->ip_versions[DCE_HWIP][0]) {
4354 case IP_VERSION(1, 0, 0):
4355 case IP_VERSION(1, 0, 1):
4356 case IP_VERSION(2, 0, 2):
4357 case IP_VERSION(2, 0, 3):
4358 case IP_VERSION(2, 0, 0):
4359 case IP_VERSION(2, 1, 0):
4360 case IP_VERSION(3, 0, 0):
4361 case IP_VERSION(3, 0, 2):
4362 case IP_VERSION(3, 0, 3):
4363 case IP_VERSION(3, 0, 1):
4364 case IP_VERSION(3, 1, 2):
4365 case IP_VERSION(3, 1, 3):
4366 if (dcn10_register_irq_handlers(dm->adev)) {
4367 DRM_ERROR("DM: Failed to initialize IRQ\n");
4372 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4373 adev->ip_versions[DCE_HWIP][0]);
4388 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4390 drm_atomic_private_obj_fini(&dm->atomic_obj);
4394 /******************************************************************************
4395 * amdgpu_display_funcs functions
4396 *****************************************************************************/
4399 * dm_bandwidth_update - program display watermarks
4401 * @adev: amdgpu_device pointer
4403 * Calculate and program the display watermarks and line buffer allocation.
4405 static void dm_bandwidth_update(struct amdgpu_device *adev)
4407 /* TODO: implement later */
4410 static const struct amdgpu_display_funcs dm_display_funcs = {
4411 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4412 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4413 .backlight_set_level = NULL, /* never called for DC */
4414 .backlight_get_level = NULL, /* never called for DC */
4415 .hpd_sense = NULL,/* called unconditionally */
4416 .hpd_set_polarity = NULL, /* called unconditionally */
4417 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4418 .page_flip_get_scanoutpos =
4419 dm_crtc_get_scanoutpos,/* called unconditionally */
4420 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4421 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4424 #if defined(CONFIG_DEBUG_KERNEL_DC)
4426 static ssize_t s3_debug_store(struct device *device,
4427 struct device_attribute *attr,
4433 struct drm_device *drm_dev = dev_get_drvdata(device);
4434 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4436 ret = kstrtoint(buf, 0, &s3_state);
4441 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4446 return ret == 0 ? count : 0;
4449 DEVICE_ATTR_WO(s3_debug);
4453 static int dm_early_init(void *handle)
4455 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4457 switch (adev->asic_type) {
4458 #if defined(CONFIG_DRM_AMD_DC_SI)
4462 adev->mode_info.num_crtc = 6;
4463 adev->mode_info.num_hpd = 6;
4464 adev->mode_info.num_dig = 6;
4467 adev->mode_info.num_crtc = 2;
4468 adev->mode_info.num_hpd = 2;
4469 adev->mode_info.num_dig = 2;
4474 adev->mode_info.num_crtc = 6;
4475 adev->mode_info.num_hpd = 6;
4476 adev->mode_info.num_dig = 6;
4479 adev->mode_info.num_crtc = 4;
4480 adev->mode_info.num_hpd = 6;
4481 adev->mode_info.num_dig = 7;
4485 adev->mode_info.num_crtc = 2;
4486 adev->mode_info.num_hpd = 6;
4487 adev->mode_info.num_dig = 6;
4491 adev->mode_info.num_crtc = 6;
4492 adev->mode_info.num_hpd = 6;
4493 adev->mode_info.num_dig = 7;
4496 adev->mode_info.num_crtc = 3;
4497 adev->mode_info.num_hpd = 6;
4498 adev->mode_info.num_dig = 9;
4501 adev->mode_info.num_crtc = 2;
4502 adev->mode_info.num_hpd = 6;
4503 adev->mode_info.num_dig = 9;
4505 case CHIP_POLARIS11:
4506 case CHIP_POLARIS12:
4507 adev->mode_info.num_crtc = 5;
4508 adev->mode_info.num_hpd = 5;
4509 adev->mode_info.num_dig = 5;
4511 case CHIP_POLARIS10:
4513 adev->mode_info.num_crtc = 6;
4514 adev->mode_info.num_hpd = 6;
4515 adev->mode_info.num_dig = 6;
4520 adev->mode_info.num_crtc = 6;
4521 adev->mode_info.num_hpd = 6;
4522 adev->mode_info.num_dig = 6;
4525 #if defined(CONFIG_DRM_AMD_DC_DCN)
4526 switch (adev->ip_versions[DCE_HWIP][0]) {
4527 case IP_VERSION(2, 0, 2):
4528 case IP_VERSION(3, 0, 0):
4529 adev->mode_info.num_crtc = 6;
4530 adev->mode_info.num_hpd = 6;
4531 adev->mode_info.num_dig = 6;
4533 case IP_VERSION(2, 0, 0):
4534 case IP_VERSION(3, 0, 2):
4535 adev->mode_info.num_crtc = 5;
4536 adev->mode_info.num_hpd = 5;
4537 adev->mode_info.num_dig = 5;
4539 case IP_VERSION(2, 0, 3):
4540 case IP_VERSION(3, 0, 3):
4541 adev->mode_info.num_crtc = 2;
4542 adev->mode_info.num_hpd = 2;
4543 adev->mode_info.num_dig = 2;
4545 case IP_VERSION(1, 0, 0):
4546 case IP_VERSION(1, 0, 1):
4547 case IP_VERSION(3, 0, 1):
4548 case IP_VERSION(2, 1, 0):
4549 case IP_VERSION(3, 1, 2):
4550 case IP_VERSION(3, 1, 3):
4551 adev->mode_info.num_crtc = 4;
4552 adev->mode_info.num_hpd = 4;
4553 adev->mode_info.num_dig = 4;
4556 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4557 adev->ip_versions[DCE_HWIP][0]);
4564 amdgpu_dm_set_irq_funcs(adev);
4566 if (adev->mode_info.funcs == NULL)
4567 adev->mode_info.funcs = &dm_display_funcs;
4570 * Note: Do NOT change adev->audio_endpt_rreg and
4571 * adev->audio_endpt_wreg because they are initialised in
4572 * amdgpu_device_init()
4574 #if defined(CONFIG_DEBUG_KERNEL_DC)
4576 adev_to_drm(adev)->dev,
4577 &dev_attr_s3_debug);
4583 static bool modeset_required(struct drm_crtc_state *crtc_state,
4584 struct dc_stream_state *new_stream,
4585 struct dc_stream_state *old_stream)
4587 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4590 static bool modereset_required(struct drm_crtc_state *crtc_state)
4592 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4595 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4597 drm_encoder_cleanup(encoder);
4601 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4602 .destroy = amdgpu_dm_encoder_destroy,
4606 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4607 struct drm_framebuffer *fb,
4608 int *min_downscale, int *max_upscale)
4610 struct amdgpu_device *adev = drm_to_adev(dev);
4611 struct dc *dc = adev->dm.dc;
4612 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4613 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4615 switch (fb->format->format) {
4616 case DRM_FORMAT_P010:
4617 case DRM_FORMAT_NV12:
4618 case DRM_FORMAT_NV21:
4619 *max_upscale = plane_cap->max_upscale_factor.nv12;
4620 *min_downscale = plane_cap->max_downscale_factor.nv12;
4623 case DRM_FORMAT_XRGB16161616F:
4624 case DRM_FORMAT_ARGB16161616F:
4625 case DRM_FORMAT_XBGR16161616F:
4626 case DRM_FORMAT_ABGR16161616F:
4627 *max_upscale = plane_cap->max_upscale_factor.fp16;
4628 *min_downscale = plane_cap->max_downscale_factor.fp16;
4632 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4633 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4638 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4639 * scaling factor of 1.0 == 1000 units.
4641 if (*max_upscale == 1)
4642 *max_upscale = 1000;
4644 if (*min_downscale == 1)
4645 *min_downscale = 1000;
4649 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4650 const struct drm_plane_state *state,
4651 struct dc_scaling_info *scaling_info)
4653 int scale_w, scale_h, min_downscale, max_upscale;
4655 memset(scaling_info, 0, sizeof(*scaling_info));
4657 /* Source is fixed 16.16 but we ignore mantissa for now... */
4658 scaling_info->src_rect.x = state->src_x >> 16;
4659 scaling_info->src_rect.y = state->src_y >> 16;
4662 * For reasons we don't (yet) fully understand a non-zero
4663 * src_y coordinate into an NV12 buffer can cause a
4664 * system hang on DCN1x.
4665 * To avoid hangs (and maybe be overly cautious)
4666 * let's reject both non-zero src_x and src_y.
4668 * We currently know of only one use-case to reproduce a
4669 * scenario with non-zero src_x and src_y for NV12, which
4670 * is to gesture the YouTube Android app into full screen
4673 if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4674 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4675 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4676 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4679 scaling_info->src_rect.width = state->src_w >> 16;
4680 if (scaling_info->src_rect.width == 0)
4683 scaling_info->src_rect.height = state->src_h >> 16;
4684 if (scaling_info->src_rect.height == 0)
4687 scaling_info->dst_rect.x = state->crtc_x;
4688 scaling_info->dst_rect.y = state->crtc_y;
4690 if (state->crtc_w == 0)
4693 scaling_info->dst_rect.width = state->crtc_w;
4695 if (state->crtc_h == 0)
4698 scaling_info->dst_rect.height = state->crtc_h;
4700 /* DRM doesn't specify clipping on destination output. */
4701 scaling_info->clip_rect = scaling_info->dst_rect;
4703 /* Validate scaling per-format with DC plane caps */
4704 if (state->plane && state->plane->dev && state->fb) {
4705 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4706 &min_downscale, &max_upscale);
4708 min_downscale = 250;
4709 max_upscale = 16000;
4712 scale_w = scaling_info->dst_rect.width * 1000 /
4713 scaling_info->src_rect.width;
4715 if (scale_w < min_downscale || scale_w > max_upscale)
4718 scale_h = scaling_info->dst_rect.height * 1000 /
4719 scaling_info->src_rect.height;
4721 if (scale_h < min_downscale || scale_h > max_upscale)
4725 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4726 * assume reasonable defaults based on the format.
4733 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4734 uint64_t tiling_flags)
4736 /* Fill GFX8 params */
4737 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4738 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4740 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4741 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4742 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4743 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4744 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4746 /* XXX fix me for VI */
4747 tiling_info->gfx8.num_banks = num_banks;
4748 tiling_info->gfx8.array_mode =
4749 DC_ARRAY_2D_TILED_THIN1;
4750 tiling_info->gfx8.tile_split = tile_split;
4751 tiling_info->gfx8.bank_width = bankw;
4752 tiling_info->gfx8.bank_height = bankh;
4753 tiling_info->gfx8.tile_aspect = mtaspect;
4754 tiling_info->gfx8.tile_mode =
4755 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4756 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4757 == DC_ARRAY_1D_TILED_THIN1) {
4758 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4761 tiling_info->gfx8.pipe_config =
4762 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4766 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4767 union dc_tiling_info *tiling_info)
4769 tiling_info->gfx9.num_pipes =
4770 adev->gfx.config.gb_addr_config_fields.num_pipes;
4771 tiling_info->gfx9.num_banks =
4772 adev->gfx.config.gb_addr_config_fields.num_banks;
4773 tiling_info->gfx9.pipe_interleave =
4774 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4775 tiling_info->gfx9.num_shader_engines =
4776 adev->gfx.config.gb_addr_config_fields.num_se;
4777 tiling_info->gfx9.max_compressed_frags =
4778 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4779 tiling_info->gfx9.num_rb_per_se =
4780 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4781 tiling_info->gfx9.shaderEnable = 1;
4782 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4783 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4787 validate_dcc(struct amdgpu_device *adev,
4788 const enum surface_pixel_format format,
4789 const enum dc_rotation_angle rotation,
4790 const union dc_tiling_info *tiling_info,
4791 const struct dc_plane_dcc_param *dcc,
4792 const struct dc_plane_address *address,
4793 const struct plane_size *plane_size)
4795 struct dc *dc = adev->dm.dc;
4796 struct dc_dcc_surface_param input;
4797 struct dc_surface_dcc_cap output;
4799 memset(&input, 0, sizeof(input));
4800 memset(&output, 0, sizeof(output));
4805 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4806 !dc->cap_funcs.get_dcc_compression_cap)
4809 input.format = format;
4810 input.surface_size.width = plane_size->surface_size.width;
4811 input.surface_size.height = plane_size->surface_size.height;
4812 input.swizzle_mode = tiling_info->gfx9.swizzle;
4814 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4815 input.scan = SCAN_DIRECTION_HORIZONTAL;
4816 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4817 input.scan = SCAN_DIRECTION_VERTICAL;
4819 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4822 if (!output.capable)
4825 if (dcc->independent_64b_blks == 0 &&
4826 output.grph.rgb.independent_64b_blks != 0)
4833 modifier_has_dcc(uint64_t modifier)
4835 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4839 modifier_gfx9_swizzle_mode(uint64_t modifier)
4841 if (modifier == DRM_FORMAT_MOD_LINEAR)
4844 return AMD_FMT_MOD_GET(TILE, modifier);
4847 static const struct drm_format_info *
4848 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4850 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4854 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4855 union dc_tiling_info *tiling_info,
4858 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4859 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4860 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4861 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4863 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4865 if (!IS_AMD_FMT_MOD(modifier))
4868 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4869 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4871 if (adev->family >= AMDGPU_FAMILY_NV) {
4872 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4874 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4876 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4880 enum dm_micro_swizzle {
4881 MICRO_SWIZZLE_Z = 0,
4882 MICRO_SWIZZLE_S = 1,
4883 MICRO_SWIZZLE_D = 2,
4887 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4891 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4892 const struct drm_format_info *info = drm_format_info(format);
4895 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4901 * We always have to allow these modifiers:
4902 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4903 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4905 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4906 modifier == DRM_FORMAT_MOD_INVALID) {
4910 /* Check that the modifier is on the list of the plane's supported modifiers. */
4911 for (i = 0; i < plane->modifier_count; i++) {
4912 if (modifier == plane->modifiers[i])
4915 if (i == plane->modifier_count)
4919 * For D swizzle the canonical modifier depends on the bpp, so check
4922 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4923 adev->family >= AMDGPU_FAMILY_NV) {
4924 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4928 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4932 if (modifier_has_dcc(modifier)) {
4933 /* Per radeonsi comments 16/64 bpp are more complicated. */
4934 if (info->cpp[0] != 4)
4936 /* We support multi-planar formats, but not when combined with
4937 * additional DCC metadata planes. */
4938 if (info->num_planes > 1)
4946 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4951 if (*cap - *size < 1) {
4952 uint64_t new_cap = *cap * 2;
4953 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4961 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4967 (*mods)[*size] = mod;
4972 add_gfx9_modifiers(const struct amdgpu_device *adev,
4973 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4975 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4976 int pipe_xor_bits = min(8, pipes +
4977 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4978 int bank_xor_bits = min(8 - pipe_xor_bits,
4979 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4980 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4981 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4984 if (adev->family == AMDGPU_FAMILY_RV) {
4985 /* Raven2 and later */
4986 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4989 * No _D DCC swizzles yet because we only allow 32bpp, which
4990 * doesn't support _D on DCN
4993 if (has_constant_encode) {
4994 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4995 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4996 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4997 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4998 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4999 AMD_FMT_MOD_SET(DCC, 1) |
5000 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5001 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5002 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5005 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5006 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5007 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5008 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5009 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5010 AMD_FMT_MOD_SET(DCC, 1) |
5011 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5012 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5013 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5015 if (has_constant_encode) {
5016 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5017 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5018 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5019 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5020 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5021 AMD_FMT_MOD_SET(DCC, 1) |
5022 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5023 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5024 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5026 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5027 AMD_FMT_MOD_SET(RB, rb) |
5028 AMD_FMT_MOD_SET(PIPE, pipes));
5031 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5032 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5033 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5034 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5035 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5036 AMD_FMT_MOD_SET(DCC, 1) |
5037 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5038 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5039 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5040 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5041 AMD_FMT_MOD_SET(RB, rb) |
5042 AMD_FMT_MOD_SET(PIPE, pipes));
5046 * Only supported for 64bpp on Raven, will be filtered on format in
5047 * dm_plane_format_mod_supported.
5049 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5050 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5051 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5052 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5053 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5055 if (adev->family == AMDGPU_FAMILY_RV) {
5056 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5057 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5058 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5059 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5060 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5064 * Only supported for 64bpp on Raven, will be filtered on format in
5065 * dm_plane_format_mod_supported.
5067 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5068 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5069 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5071 if (adev->family == AMDGPU_FAMILY_RV) {
5072 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5073 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5074 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5079 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5080 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5082 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5084 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5085 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5086 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5087 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5088 AMD_FMT_MOD_SET(DCC, 1) |
5089 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5090 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5091 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5093 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5094 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5095 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5096 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5097 AMD_FMT_MOD_SET(DCC, 1) |
5098 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5099 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5100 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5101 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5103 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5104 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5105 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5106 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5108 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5109 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5110 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5111 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5114 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5115 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5116 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5117 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5119 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5120 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5121 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5125 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5126 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5128 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5129 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5131 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5132 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5133 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5134 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5135 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5136 AMD_FMT_MOD_SET(DCC, 1) |
5137 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5138 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5139 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5140 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5142 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5143 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5144 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5145 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5146 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5147 AMD_FMT_MOD_SET(DCC, 1) |
5148 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5149 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5150 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5152 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5153 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5154 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5155 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5156 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5157 AMD_FMT_MOD_SET(DCC, 1) |
5158 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5159 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5160 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5161 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5162 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5164 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5165 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5166 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5167 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5168 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5169 AMD_FMT_MOD_SET(DCC, 1) |
5170 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5171 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5172 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5173 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5175 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5176 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5177 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5178 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5179 AMD_FMT_MOD_SET(PACKERS, pkrs));
5181 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5182 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5183 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5184 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5185 AMD_FMT_MOD_SET(PACKERS, pkrs));
5187 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5188 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5189 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5190 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5192 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5193 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5194 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5198 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5200 uint64_t size = 0, capacity = 128;
5203 /* We have not hooked up any pre-GFX9 modifiers. */
5204 if (adev->family < AMDGPU_FAMILY_AI)
5207 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5209 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5210 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5211 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5212 return *mods ? 0 : -ENOMEM;
5215 switch (adev->family) {
5216 case AMDGPU_FAMILY_AI:
5217 case AMDGPU_FAMILY_RV:
5218 add_gfx9_modifiers(adev, mods, &size, &capacity);
5220 case AMDGPU_FAMILY_NV:
5221 case AMDGPU_FAMILY_VGH:
5222 case AMDGPU_FAMILY_YC:
5223 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5224 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5226 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5230 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5232 /* INVALID marks the end of the list. */
5233 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5242 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5243 const struct amdgpu_framebuffer *afb,
5244 const enum surface_pixel_format format,
5245 const enum dc_rotation_angle rotation,
5246 const struct plane_size *plane_size,
5247 union dc_tiling_info *tiling_info,
5248 struct dc_plane_dcc_param *dcc,
5249 struct dc_plane_address *address,
5250 const bool force_disable_dcc)
5252 const uint64_t modifier = afb->base.modifier;
5255 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5256 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5258 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5259 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5260 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5261 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5264 dcc->meta_pitch = afb->base.pitches[1];
5265 dcc->independent_64b_blks = independent_64b_blks;
5266 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5267 if (independent_64b_blks && independent_128b_blks)
5268 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5269 else if (independent_128b_blks)
5270 dcc->dcc_ind_blk = hubp_ind_block_128b;
5271 else if (independent_64b_blks && !independent_128b_blks)
5272 dcc->dcc_ind_blk = hubp_ind_block_64b;
5274 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5276 if (independent_64b_blks)
5277 dcc->dcc_ind_blk = hubp_ind_block_64b;
5279 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5282 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5283 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5286 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5288 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5294 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5295 const struct amdgpu_framebuffer *afb,
5296 const enum surface_pixel_format format,
5297 const enum dc_rotation_angle rotation,
5298 const uint64_t tiling_flags,
5299 union dc_tiling_info *tiling_info,
5300 struct plane_size *plane_size,
5301 struct dc_plane_dcc_param *dcc,
5302 struct dc_plane_address *address,
5304 bool force_disable_dcc)
5306 const struct drm_framebuffer *fb = &afb->base;
5309 memset(tiling_info, 0, sizeof(*tiling_info));
5310 memset(plane_size, 0, sizeof(*plane_size));
5311 memset(dcc, 0, sizeof(*dcc));
5312 memset(address, 0, sizeof(*address));
5314 address->tmz_surface = tmz_surface;
5316 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5317 uint64_t addr = afb->address + fb->offsets[0];
5319 plane_size->surface_size.x = 0;
5320 plane_size->surface_size.y = 0;
5321 plane_size->surface_size.width = fb->width;
5322 plane_size->surface_size.height = fb->height;
5323 plane_size->surface_pitch =
5324 fb->pitches[0] / fb->format->cpp[0];
5326 address->type = PLN_ADDR_TYPE_GRAPHICS;
5327 address->grph.addr.low_part = lower_32_bits(addr);
5328 address->grph.addr.high_part = upper_32_bits(addr);
5329 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5330 uint64_t luma_addr = afb->address + fb->offsets[0];
5331 uint64_t chroma_addr = afb->address + fb->offsets[1];
5333 plane_size->surface_size.x = 0;
5334 plane_size->surface_size.y = 0;
5335 plane_size->surface_size.width = fb->width;
5336 plane_size->surface_size.height = fb->height;
5337 plane_size->surface_pitch =
5338 fb->pitches[0] / fb->format->cpp[0];
5340 plane_size->chroma_size.x = 0;
5341 plane_size->chroma_size.y = 0;
5342 /* TODO: set these based on surface format */
5343 plane_size->chroma_size.width = fb->width / 2;
5344 plane_size->chroma_size.height = fb->height / 2;
5346 plane_size->chroma_pitch =
5347 fb->pitches[1] / fb->format->cpp[1];
5349 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5350 address->video_progressive.luma_addr.low_part =
5351 lower_32_bits(luma_addr);
5352 address->video_progressive.luma_addr.high_part =
5353 upper_32_bits(luma_addr);
5354 address->video_progressive.chroma_addr.low_part =
5355 lower_32_bits(chroma_addr);
5356 address->video_progressive.chroma_addr.high_part =
5357 upper_32_bits(chroma_addr);
5360 if (adev->family >= AMDGPU_FAMILY_AI) {
5361 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5362 rotation, plane_size,
5369 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5376 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5377 bool *per_pixel_alpha, bool *global_alpha,
5378 int *global_alpha_value)
5380 *per_pixel_alpha = false;
5381 *global_alpha = false;
5382 *global_alpha_value = 0xff;
5384 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5387 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5388 static const uint32_t alpha_formats[] = {
5389 DRM_FORMAT_ARGB8888,
5390 DRM_FORMAT_RGBA8888,
5391 DRM_FORMAT_ABGR8888,
5393 uint32_t format = plane_state->fb->format->format;
5396 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5397 if (format == alpha_formats[i]) {
5398 *per_pixel_alpha = true;
5404 if (plane_state->alpha < 0xffff) {
5405 *global_alpha = true;
5406 *global_alpha_value = plane_state->alpha >> 8;
5411 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5412 const enum surface_pixel_format format,
5413 enum dc_color_space *color_space)
5417 *color_space = COLOR_SPACE_SRGB;
5419 /* DRM color properties only affect non-RGB formats. */
5420 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5423 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5425 switch (plane_state->color_encoding) {
5426 case DRM_COLOR_YCBCR_BT601:
5428 *color_space = COLOR_SPACE_YCBCR601;
5430 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5433 case DRM_COLOR_YCBCR_BT709:
5435 *color_space = COLOR_SPACE_YCBCR709;
5437 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5440 case DRM_COLOR_YCBCR_BT2020:
5442 *color_space = COLOR_SPACE_2020_YCBCR;
5455 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5456 const struct drm_plane_state *plane_state,
5457 const uint64_t tiling_flags,
5458 struct dc_plane_info *plane_info,
5459 struct dc_plane_address *address,
5461 bool force_disable_dcc)
5463 const struct drm_framebuffer *fb = plane_state->fb;
5464 const struct amdgpu_framebuffer *afb =
5465 to_amdgpu_framebuffer(plane_state->fb);
5468 memset(plane_info, 0, sizeof(*plane_info));
5470 switch (fb->format->format) {
5472 plane_info->format =
5473 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5475 case DRM_FORMAT_RGB565:
5476 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5478 case DRM_FORMAT_XRGB8888:
5479 case DRM_FORMAT_ARGB8888:
5480 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5482 case DRM_FORMAT_XRGB2101010:
5483 case DRM_FORMAT_ARGB2101010:
5484 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5486 case DRM_FORMAT_XBGR2101010:
5487 case DRM_FORMAT_ABGR2101010:
5488 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5490 case DRM_FORMAT_XBGR8888:
5491 case DRM_FORMAT_ABGR8888:
5492 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5494 case DRM_FORMAT_NV21:
5495 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5497 case DRM_FORMAT_NV12:
5498 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5500 case DRM_FORMAT_P010:
5501 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5503 case DRM_FORMAT_XRGB16161616F:
5504 case DRM_FORMAT_ARGB16161616F:
5505 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5507 case DRM_FORMAT_XBGR16161616F:
5508 case DRM_FORMAT_ABGR16161616F:
5509 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5511 case DRM_FORMAT_XRGB16161616:
5512 case DRM_FORMAT_ARGB16161616:
5513 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5515 case DRM_FORMAT_XBGR16161616:
5516 case DRM_FORMAT_ABGR16161616:
5517 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5521 "Unsupported screen format %p4cc\n",
5522 &fb->format->format);
5526 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5527 case DRM_MODE_ROTATE_0:
5528 plane_info->rotation = ROTATION_ANGLE_0;
5530 case DRM_MODE_ROTATE_90:
5531 plane_info->rotation = ROTATION_ANGLE_90;
5533 case DRM_MODE_ROTATE_180:
5534 plane_info->rotation = ROTATION_ANGLE_180;
5536 case DRM_MODE_ROTATE_270:
5537 plane_info->rotation = ROTATION_ANGLE_270;
5540 plane_info->rotation = ROTATION_ANGLE_0;
5544 plane_info->visible = true;
5545 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5547 plane_info->layer_index = 0;
5549 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5550 &plane_info->color_space);
5554 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5555 plane_info->rotation, tiling_flags,
5556 &plane_info->tiling_info,
5557 &plane_info->plane_size,
5558 &plane_info->dcc, address, tmz_surface,
5563 fill_blending_from_plane_state(
5564 plane_state, &plane_info->per_pixel_alpha,
5565 &plane_info->global_alpha, &plane_info->global_alpha_value);
5570 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5571 struct dc_plane_state *dc_plane_state,
5572 struct drm_plane_state *plane_state,
5573 struct drm_crtc_state *crtc_state)
5575 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5576 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5577 struct dc_scaling_info scaling_info;
5578 struct dc_plane_info plane_info;
5580 bool force_disable_dcc = false;
5582 ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5586 dc_plane_state->src_rect = scaling_info.src_rect;
5587 dc_plane_state->dst_rect = scaling_info.dst_rect;
5588 dc_plane_state->clip_rect = scaling_info.clip_rect;
5589 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5591 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5592 ret = fill_dc_plane_info_and_addr(adev, plane_state,
5595 &dc_plane_state->address,
5601 dc_plane_state->format = plane_info.format;
5602 dc_plane_state->color_space = plane_info.color_space;
5603 dc_plane_state->format = plane_info.format;
5604 dc_plane_state->plane_size = plane_info.plane_size;
5605 dc_plane_state->rotation = plane_info.rotation;
5606 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5607 dc_plane_state->stereo_format = plane_info.stereo_format;
5608 dc_plane_state->tiling_info = plane_info.tiling_info;
5609 dc_plane_state->visible = plane_info.visible;
5610 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5611 dc_plane_state->global_alpha = plane_info.global_alpha;
5612 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5613 dc_plane_state->dcc = plane_info.dcc;
5614 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5615 dc_plane_state->flip_int_enabled = true;
5618 * Always set input transfer function, since plane state is refreshed
5621 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5628 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5629 const struct dm_connector_state *dm_state,
5630 struct dc_stream_state *stream)
5632 enum amdgpu_rmx_type rmx_type;
5634 struct rect src = { 0 }; /* viewport in composition space*/
5635 struct rect dst = { 0 }; /* stream addressable area */
5637 /* no mode. nothing to be done */
5641 /* Full screen scaling by default */
5642 src.width = mode->hdisplay;
5643 src.height = mode->vdisplay;
5644 dst.width = stream->timing.h_addressable;
5645 dst.height = stream->timing.v_addressable;
5648 rmx_type = dm_state->scaling;
5649 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5650 if (src.width * dst.height <
5651 src.height * dst.width) {
5652 /* height needs less upscaling/more downscaling */
5653 dst.width = src.width *
5654 dst.height / src.height;
5656 /* width needs less upscaling/more downscaling */
5657 dst.height = src.height *
5658 dst.width / src.width;
5660 } else if (rmx_type == RMX_CENTER) {
5664 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5665 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5667 if (dm_state->underscan_enable) {
5668 dst.x += dm_state->underscan_hborder / 2;
5669 dst.y += dm_state->underscan_vborder / 2;
5670 dst.width -= dm_state->underscan_hborder;
5671 dst.height -= dm_state->underscan_vborder;
5678 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5679 dst.x, dst.y, dst.width, dst.height);
5683 static enum dc_color_depth
5684 convert_color_depth_from_display_info(const struct drm_connector *connector,
5685 bool is_y420, int requested_bpc)
5692 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5693 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5695 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5697 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5700 bpc = (uint8_t)connector->display_info.bpc;
5701 /* Assume 8 bpc by default if no bpc is specified. */
5702 bpc = bpc ? bpc : 8;
5705 if (requested_bpc > 0) {
5707 * Cap display bpc based on the user requested value.
5709 * The value for state->max_bpc may not correctly updated
5710 * depending on when the connector gets added to the state
5711 * or if this was called outside of atomic check, so it
5712 * can't be used directly.
5714 bpc = min_t(u8, bpc, requested_bpc);
5716 /* Round down to the nearest even number. */
5717 bpc = bpc - (bpc & 1);
5723 * Temporary Work around, DRM doesn't parse color depth for
5724 * EDID revision before 1.4
5725 * TODO: Fix edid parsing
5727 return COLOR_DEPTH_888;
5729 return COLOR_DEPTH_666;
5731 return COLOR_DEPTH_888;
5733 return COLOR_DEPTH_101010;
5735 return COLOR_DEPTH_121212;
5737 return COLOR_DEPTH_141414;
5739 return COLOR_DEPTH_161616;
5741 return COLOR_DEPTH_UNDEFINED;
5745 static enum dc_aspect_ratio
5746 get_aspect_ratio(const struct drm_display_mode *mode_in)
5748 /* 1-1 mapping, since both enums follow the HDMI spec. */
5749 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5752 static enum dc_color_space
5753 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5755 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5757 switch (dc_crtc_timing->pixel_encoding) {
5758 case PIXEL_ENCODING_YCBCR422:
5759 case PIXEL_ENCODING_YCBCR444:
5760 case PIXEL_ENCODING_YCBCR420:
5763 * 27030khz is the separation point between HDTV and SDTV
5764 * according to HDMI spec, we use YCbCr709 and YCbCr601
5767 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5768 if (dc_crtc_timing->flags.Y_ONLY)
5770 COLOR_SPACE_YCBCR709_LIMITED;
5772 color_space = COLOR_SPACE_YCBCR709;
5774 if (dc_crtc_timing->flags.Y_ONLY)
5776 COLOR_SPACE_YCBCR601_LIMITED;
5778 color_space = COLOR_SPACE_YCBCR601;
5783 case PIXEL_ENCODING_RGB:
5784 color_space = COLOR_SPACE_SRGB;
5795 static bool adjust_colour_depth_from_display_info(
5796 struct dc_crtc_timing *timing_out,
5797 const struct drm_display_info *info)
5799 enum dc_color_depth depth = timing_out->display_color_depth;
5802 normalized_clk = timing_out->pix_clk_100hz / 10;
5803 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5804 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5805 normalized_clk /= 2;
5806 /* Adjusting pix clock following on HDMI spec based on colour depth */
5808 case COLOR_DEPTH_888:
5810 case COLOR_DEPTH_101010:
5811 normalized_clk = (normalized_clk * 30) / 24;
5813 case COLOR_DEPTH_121212:
5814 normalized_clk = (normalized_clk * 36) / 24;
5816 case COLOR_DEPTH_161616:
5817 normalized_clk = (normalized_clk * 48) / 24;
5820 /* The above depths are the only ones valid for HDMI. */
5823 if (normalized_clk <= info->max_tmds_clock) {
5824 timing_out->display_color_depth = depth;
5827 } while (--depth > COLOR_DEPTH_666);
5831 static void fill_stream_properties_from_drm_display_mode(
5832 struct dc_stream_state *stream,
5833 const struct drm_display_mode *mode_in,
5834 const struct drm_connector *connector,
5835 const struct drm_connector_state *connector_state,
5836 const struct dc_stream_state *old_stream,
5839 struct dc_crtc_timing *timing_out = &stream->timing;
5840 const struct drm_display_info *info = &connector->display_info;
5841 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5842 struct hdmi_vendor_infoframe hv_frame;
5843 struct hdmi_avi_infoframe avi_frame;
5845 memset(&hv_frame, 0, sizeof(hv_frame));
5846 memset(&avi_frame, 0, sizeof(avi_frame));
5848 timing_out->h_border_left = 0;
5849 timing_out->h_border_right = 0;
5850 timing_out->v_border_top = 0;
5851 timing_out->v_border_bottom = 0;
5852 /* TODO: un-hardcode */
5853 if (drm_mode_is_420_only(info, mode_in)
5854 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5855 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5856 else if (drm_mode_is_420_also(info, mode_in)
5857 && aconnector->force_yuv420_output)
5858 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5859 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5860 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5861 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5863 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5865 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5866 timing_out->display_color_depth = convert_color_depth_from_display_info(
5868 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5870 timing_out->scan_type = SCANNING_TYPE_NODATA;
5871 timing_out->hdmi_vic = 0;
5874 timing_out->vic = old_stream->timing.vic;
5875 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5876 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5878 timing_out->vic = drm_match_cea_mode(mode_in);
5879 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5880 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5881 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5882 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5885 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5886 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5887 timing_out->vic = avi_frame.video_code;
5888 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5889 timing_out->hdmi_vic = hv_frame.vic;
5892 if (is_freesync_video_mode(mode_in, aconnector)) {
5893 timing_out->h_addressable = mode_in->hdisplay;
5894 timing_out->h_total = mode_in->htotal;
5895 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5896 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5897 timing_out->v_total = mode_in->vtotal;
5898 timing_out->v_addressable = mode_in->vdisplay;
5899 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5900 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5901 timing_out->pix_clk_100hz = mode_in->clock * 10;
5903 timing_out->h_addressable = mode_in->crtc_hdisplay;
5904 timing_out->h_total = mode_in->crtc_htotal;
5905 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5906 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5907 timing_out->v_total = mode_in->crtc_vtotal;
5908 timing_out->v_addressable = mode_in->crtc_vdisplay;
5909 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5910 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5911 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5914 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5916 stream->output_color_space = get_output_color_space(timing_out);
5918 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5919 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5920 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5921 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5922 drm_mode_is_420_also(info, mode_in) &&
5923 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5924 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5925 adjust_colour_depth_from_display_info(timing_out, info);
5930 static void fill_audio_info(struct audio_info *audio_info,
5931 const struct drm_connector *drm_connector,
5932 const struct dc_sink *dc_sink)
5935 int cea_revision = 0;
5936 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5938 audio_info->manufacture_id = edid_caps->manufacturer_id;
5939 audio_info->product_id = edid_caps->product_id;
5941 cea_revision = drm_connector->display_info.cea_rev;
5943 strscpy(audio_info->display_name,
5944 edid_caps->display_name,
5945 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5947 if (cea_revision >= 3) {
5948 audio_info->mode_count = edid_caps->audio_mode_count;
5950 for (i = 0; i < audio_info->mode_count; ++i) {
5951 audio_info->modes[i].format_code =
5952 (enum audio_format_code)
5953 (edid_caps->audio_modes[i].format_code);
5954 audio_info->modes[i].channel_count =
5955 edid_caps->audio_modes[i].channel_count;
5956 audio_info->modes[i].sample_rates.all =
5957 edid_caps->audio_modes[i].sample_rate;
5958 audio_info->modes[i].sample_size =
5959 edid_caps->audio_modes[i].sample_size;
5963 audio_info->flags.all = edid_caps->speaker_flags;
5965 /* TODO: We only check for the progressive mode, check for interlace mode too */
5966 if (drm_connector->latency_present[0]) {
5967 audio_info->video_latency = drm_connector->video_latency[0];
5968 audio_info->audio_latency = drm_connector->audio_latency[0];
5971 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5976 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5977 struct drm_display_mode *dst_mode)
5979 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5980 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5981 dst_mode->crtc_clock = src_mode->crtc_clock;
5982 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5983 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5984 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5985 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5986 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5987 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5988 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5989 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5990 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5991 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5992 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5996 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5997 const struct drm_display_mode *native_mode,
6000 if (scale_enabled) {
6001 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6002 } else if (native_mode->clock == drm_mode->clock &&
6003 native_mode->htotal == drm_mode->htotal &&
6004 native_mode->vtotal == drm_mode->vtotal) {
6005 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6007 /* no scaling nor amdgpu inserted, no need to patch */
6011 static struct dc_sink *
6012 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6014 struct dc_sink_init_data sink_init_data = { 0 };
6015 struct dc_sink *sink = NULL;
6016 sink_init_data.link = aconnector->dc_link;
6017 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6019 sink = dc_sink_create(&sink_init_data);
6021 DRM_ERROR("Failed to create sink!\n");
6024 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6029 static void set_multisync_trigger_params(
6030 struct dc_stream_state *stream)
6032 struct dc_stream_state *master = NULL;
6034 if (stream->triggered_crtc_reset.enabled) {
6035 master = stream->triggered_crtc_reset.event_source;
6036 stream->triggered_crtc_reset.event =
6037 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6038 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6039 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6043 static void set_master_stream(struct dc_stream_state *stream_set[],
6046 int j, highest_rfr = 0, master_stream = 0;
6048 for (j = 0; j < stream_count; j++) {
6049 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6050 int refresh_rate = 0;
6052 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6053 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6054 if (refresh_rate > highest_rfr) {
6055 highest_rfr = refresh_rate;
6060 for (j = 0; j < stream_count; j++) {
6062 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6066 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6069 struct dc_stream_state *stream;
6071 if (context->stream_count < 2)
6073 for (i = 0; i < context->stream_count ; i++) {
6074 if (!context->streams[i])
6077 * TODO: add a function to read AMD VSDB bits and set
6078 * crtc_sync_master.multi_sync_enabled flag
6079 * For now it's set to false
6083 set_master_stream(context->streams, context->stream_count);
6085 for (i = 0; i < context->stream_count ; i++) {
6086 stream = context->streams[i];
6091 set_multisync_trigger_params(stream);
6095 #if defined(CONFIG_DRM_AMD_DC_DCN)
6096 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6097 struct dc_sink *sink, struct dc_stream_state *stream,
6098 struct dsc_dec_dpcd_caps *dsc_caps)
6100 stream->timing.flags.DSC = 0;
6101 dsc_caps->is_dsc_supported = false;
6103 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6104 sink->sink_signal == SIGNAL_TYPE_EDP)) {
6105 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6106 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6107 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6108 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6109 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6114 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6115 struct dc_sink *sink, struct dc_stream_state *stream,
6116 struct dsc_dec_dpcd_caps *dsc_caps,
6117 uint32_t max_dsc_target_bpp_limit_override)
6119 const struct dc_link_settings *verified_link_cap = NULL;
6120 uint32_t link_bw_in_kbps;
6121 uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6122 struct dc *dc = sink->ctx->dc;
6123 struct dc_dsc_bw_range bw_range = {0};
6124 struct dc_dsc_config dsc_cfg = {0};
6126 verified_link_cap = dc_link_get_link_cap(stream->link);
6127 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6128 edp_min_bpp_x16 = 8 * 16;
6129 edp_max_bpp_x16 = 8 * 16;
6131 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6132 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6134 if (edp_max_bpp_x16 < edp_min_bpp_x16)
6135 edp_min_bpp_x16 = edp_max_bpp_x16;
6137 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6138 dc->debug.dsc_min_slice_height_override,
6139 edp_min_bpp_x16, edp_max_bpp_x16,
6144 if (bw_range.max_kbps < link_bw_in_kbps) {
6145 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6147 dc->debug.dsc_min_slice_height_override,
6148 max_dsc_target_bpp_limit_override,
6152 stream->timing.dsc_cfg = dsc_cfg;
6153 stream->timing.flags.DSC = 1;
6154 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6160 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6162 dc->debug.dsc_min_slice_height_override,
6163 max_dsc_target_bpp_limit_override,
6167 stream->timing.dsc_cfg = dsc_cfg;
6168 stream->timing.flags.DSC = 1;
6172 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6173 struct dc_sink *sink, struct dc_stream_state *stream,
6174 struct dsc_dec_dpcd_caps *dsc_caps)
6176 struct drm_connector *drm_connector = &aconnector->base;
6177 uint32_t link_bandwidth_kbps;
6178 uint32_t max_dsc_target_bpp_limit_override = 0;
6179 struct dc *dc = sink->ctx->dc;
6180 uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6181 uint32_t dsc_max_supported_bw_in_kbps;
6183 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6184 dc_link_get_link_cap(aconnector->dc_link));
6186 if (stream->link && stream->link->local_sink)
6187 max_dsc_target_bpp_limit_override =
6188 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6190 /* Set DSC policy according to dsc_clock_en */
6191 dc_dsc_policy_set_enable_dsc_when_not_needed(
6192 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6194 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6195 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6197 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6199 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6200 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6201 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6203 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6204 max_dsc_target_bpp_limit_override,
6205 link_bandwidth_kbps,
6207 &stream->timing.dsc_cfg)) {
6208 stream->timing.flags.DSC = 1;
6209 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6210 __func__, drm_connector->name);
6212 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6213 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6214 max_supported_bw_in_kbps = link_bandwidth_kbps;
6215 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6217 if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6218 max_supported_bw_in_kbps > 0 &&
6219 dsc_max_supported_bw_in_kbps > 0)
6220 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6222 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6223 max_dsc_target_bpp_limit_override,
6224 dsc_max_supported_bw_in_kbps,
6226 &stream->timing.dsc_cfg)) {
6227 stream->timing.flags.DSC = 1;
6228 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6229 __func__, drm_connector->name);
6234 /* Overwrite the stream flag if DSC is enabled through debugfs */
6235 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6236 stream->timing.flags.DSC = 1;
6238 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6239 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6241 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6242 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6244 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6245 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6247 #endif /* CONFIG_DRM_AMD_DC_DCN */
6250 * DOC: FreeSync Video
6252 * When a userspace application wants to play a video, the content follows a
6253 * standard format definition that usually specifies the FPS for that format.
6254 * The below list illustrates some video format and the expected FPS,
6257 * - TV/NTSC (23.976 FPS)
6260 * - TV/NTSC (29.97 FPS)
6261 * - TV/NTSC (30 FPS)
6262 * - Cinema HFR (48 FPS)
6264 * - Commonly used (60 FPS)
6265 * - Multiples of 24 (48,72,96,120 FPS)
6267 * The list of standards video format is not huge and can be added to the
6268 * connector modeset list beforehand. With that, userspace can leverage
6269 * FreeSync to extends the front porch in order to attain the target refresh
6270 * rate. Such a switch will happen seamlessly, without screen blanking or
6271 * reprogramming of the output in any other way. If the userspace requests a
6272 * modesetting change compatible with FreeSync modes that only differ in the
6273 * refresh rate, DC will skip the full update and avoid blink during the
6274 * transition. For example, the video player can change the modesetting from
6275 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6276 * causing any display blink. This same concept can be applied to a mode
6279 static struct drm_display_mode *
6280 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6281 bool use_probed_modes)
6283 struct drm_display_mode *m, *m_pref = NULL;
6284 u16 current_refresh, highest_refresh;
6285 struct list_head *list_head = use_probed_modes ?
6286 &aconnector->base.probed_modes :
6287 &aconnector->base.modes;
6289 if (aconnector->freesync_vid_base.clock != 0)
6290 return &aconnector->freesync_vid_base;
6292 /* Find the preferred mode */
6293 list_for_each_entry (m, list_head, head) {
6294 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6301 /* Probably an EDID with no preferred mode. Fallback to first entry */
6302 m_pref = list_first_entry_or_null(
6303 &aconnector->base.modes, struct drm_display_mode, head);
6305 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6310 highest_refresh = drm_mode_vrefresh(m_pref);
6313 * Find the mode with highest refresh rate with same resolution.
6314 * For some monitors, preferred mode is not the mode with highest
6315 * supported refresh rate.
6317 list_for_each_entry (m, list_head, head) {
6318 current_refresh = drm_mode_vrefresh(m);
6320 if (m->hdisplay == m_pref->hdisplay &&
6321 m->vdisplay == m_pref->vdisplay &&
6322 highest_refresh < current_refresh) {
6323 highest_refresh = current_refresh;
6328 aconnector->freesync_vid_base = *m_pref;
6332 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6333 struct amdgpu_dm_connector *aconnector)
6335 struct drm_display_mode *high_mode;
6338 high_mode = get_highest_refresh_rate_mode(aconnector, false);
6339 if (!high_mode || !mode)
6342 timing_diff = high_mode->vtotal - mode->vtotal;
6344 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6345 high_mode->hdisplay != mode->hdisplay ||
6346 high_mode->vdisplay != mode->vdisplay ||
6347 high_mode->hsync_start != mode->hsync_start ||
6348 high_mode->hsync_end != mode->hsync_end ||
6349 high_mode->htotal != mode->htotal ||
6350 high_mode->hskew != mode->hskew ||
6351 high_mode->vscan != mode->vscan ||
6352 high_mode->vsync_start - mode->vsync_start != timing_diff ||
6353 high_mode->vsync_end - mode->vsync_end != timing_diff)
6359 static struct dc_stream_state *
6360 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6361 const struct drm_display_mode *drm_mode,
6362 const struct dm_connector_state *dm_state,
6363 const struct dc_stream_state *old_stream,
6366 struct drm_display_mode *preferred_mode = NULL;
6367 struct drm_connector *drm_connector;
6368 const struct drm_connector_state *con_state =
6369 dm_state ? &dm_state->base : NULL;
6370 struct dc_stream_state *stream = NULL;
6371 struct drm_display_mode mode = *drm_mode;
6372 struct drm_display_mode saved_mode;
6373 struct drm_display_mode *freesync_mode = NULL;
6374 bool native_mode_found = false;
6375 bool recalculate_timing = false;
6376 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6378 int preferred_refresh = 0;
6379 #if defined(CONFIG_DRM_AMD_DC_DCN)
6380 struct dsc_dec_dpcd_caps dsc_caps;
6382 struct dc_sink *sink = NULL;
6384 memset(&saved_mode, 0, sizeof(saved_mode));
6386 if (aconnector == NULL) {
6387 DRM_ERROR("aconnector is NULL!\n");
6391 drm_connector = &aconnector->base;
6393 if (!aconnector->dc_sink) {
6394 sink = create_fake_sink(aconnector);
6398 sink = aconnector->dc_sink;
6399 dc_sink_retain(sink);
6402 stream = dc_create_stream_for_sink(sink);
6404 if (stream == NULL) {
6405 DRM_ERROR("Failed to create stream for sink!\n");
6409 stream->dm_stream_context = aconnector;
6411 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6412 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6414 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6415 /* Search for preferred mode */
6416 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6417 native_mode_found = true;
6421 if (!native_mode_found)
6422 preferred_mode = list_first_entry_or_null(
6423 &aconnector->base.modes,
6424 struct drm_display_mode,
6427 mode_refresh = drm_mode_vrefresh(&mode);
6429 if (preferred_mode == NULL) {
6431 * This may not be an error, the use case is when we have no
6432 * usermode calls to reset and set mode upon hotplug. In this
6433 * case, we call set mode ourselves to restore the previous mode
6434 * and the modelist may not be filled in in time.
6436 DRM_DEBUG_DRIVER("No preferred mode found\n");
6438 recalculate_timing = amdgpu_freesync_vid_mode &&
6439 is_freesync_video_mode(&mode, aconnector);
6440 if (recalculate_timing) {
6441 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6443 mode = *freesync_mode;
6445 decide_crtc_timing_for_drm_display_mode(
6446 &mode, preferred_mode, scale);
6448 preferred_refresh = drm_mode_vrefresh(preferred_mode);
6452 if (recalculate_timing)
6453 drm_mode_set_crtcinfo(&saved_mode, 0);
6455 drm_mode_set_crtcinfo(&mode, 0);
6458 * If scaling is enabled and refresh rate didn't change
6459 * we copy the vic and polarities of the old timings
6461 if (!scale || mode_refresh != preferred_refresh)
6462 fill_stream_properties_from_drm_display_mode(
6463 stream, &mode, &aconnector->base, con_state, NULL,
6466 fill_stream_properties_from_drm_display_mode(
6467 stream, &mode, &aconnector->base, con_state, old_stream,
6470 #if defined(CONFIG_DRM_AMD_DC_DCN)
6471 /* SST DSC determination policy */
6472 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6473 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6474 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6477 update_stream_scaling_settings(&mode, dm_state, stream);
6480 &stream->audio_info,
6484 update_stream_signal(stream, sink);
6486 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6487 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6489 if (stream->link->psr_settings.psr_feature_enabled) {
6491 // should decide stream support vsc sdp colorimetry capability
6492 // before building vsc info packet
6494 stream->use_vsc_sdp_for_colorimetry = false;
6495 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6496 stream->use_vsc_sdp_for_colorimetry =
6497 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6499 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6500 stream->use_vsc_sdp_for_colorimetry = true;
6502 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6503 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6507 dc_sink_release(sink);
6512 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6514 drm_crtc_cleanup(crtc);
6518 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6519 struct drm_crtc_state *state)
6521 struct dm_crtc_state *cur = to_dm_crtc_state(state);
6523 /* TODO Destroy dc_stream objects are stream object is flattened */
6525 dc_stream_release(cur->stream);
6528 __drm_atomic_helper_crtc_destroy_state(state);
6534 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6536 struct dm_crtc_state *state;
6539 dm_crtc_destroy_state(crtc, crtc->state);
6541 state = kzalloc(sizeof(*state), GFP_KERNEL);
6542 if (WARN_ON(!state))
6545 __drm_atomic_helper_crtc_reset(crtc, &state->base);
6548 static struct drm_crtc_state *
6549 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6551 struct dm_crtc_state *state, *cur;
6553 cur = to_dm_crtc_state(crtc->state);
6555 if (WARN_ON(!crtc->state))
6558 state = kzalloc(sizeof(*state), GFP_KERNEL);
6562 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6565 state->stream = cur->stream;
6566 dc_stream_retain(state->stream);
6569 state->active_planes = cur->active_planes;
6570 state->vrr_infopacket = cur->vrr_infopacket;
6571 state->abm_level = cur->abm_level;
6572 state->vrr_supported = cur->vrr_supported;
6573 state->freesync_config = cur->freesync_config;
6574 state->cm_has_degamma = cur->cm_has_degamma;
6575 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6576 state->force_dpms_off = cur->force_dpms_off;
6577 /* TODO Duplicate dc_stream after objects are stream object is flattened */
6579 return &state->base;
6582 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6583 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6585 crtc_debugfs_init(crtc);
6591 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6593 enum dc_irq_source irq_source;
6594 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6595 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6598 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6600 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6602 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6603 acrtc->crtc_id, enable ? "en" : "dis", rc);
6607 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6609 enum dc_irq_source irq_source;
6610 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6611 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6612 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6613 #if defined(CONFIG_DRM_AMD_DC_DCN)
6614 struct amdgpu_display_manager *dm = &adev->dm;
6615 struct vblank_control_work *work;
6620 /* vblank irq on -> Only need vupdate irq in vrr mode */
6621 if (amdgpu_dm_vrr_active(acrtc_state))
6622 rc = dm_set_vupdate_irq(crtc, true);
6624 /* vblank irq off -> vupdate irq off */
6625 rc = dm_set_vupdate_irq(crtc, false);
6631 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6633 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6636 if (amdgpu_in_reset(adev))
6639 #if defined(CONFIG_DRM_AMD_DC_DCN)
6640 if (dm->vblank_control_workqueue) {
6641 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6645 INIT_WORK(&work->work, vblank_control_worker);
6647 work->acrtc = acrtc;
6648 work->enable = enable;
6650 if (acrtc_state->stream) {
6651 dc_stream_retain(acrtc_state->stream);
6652 work->stream = acrtc_state->stream;
6655 queue_work(dm->vblank_control_workqueue, &work->work);
6662 static int dm_enable_vblank(struct drm_crtc *crtc)
6664 return dm_set_vblank(crtc, true);
6667 static void dm_disable_vblank(struct drm_crtc *crtc)
6669 dm_set_vblank(crtc, false);
6672 /* Implemented only the options currently availible for the driver */
6673 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6674 .reset = dm_crtc_reset_state,
6675 .destroy = amdgpu_dm_crtc_destroy,
6676 .set_config = drm_atomic_helper_set_config,
6677 .page_flip = drm_atomic_helper_page_flip,
6678 .atomic_duplicate_state = dm_crtc_duplicate_state,
6679 .atomic_destroy_state = dm_crtc_destroy_state,
6680 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6681 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6682 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6683 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6684 .enable_vblank = dm_enable_vblank,
6685 .disable_vblank = dm_disable_vblank,
6686 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6687 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6688 .late_register = amdgpu_dm_crtc_late_register,
6692 static enum drm_connector_status
6693 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6696 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6700 * 1. This interface is NOT called in context of HPD irq.
6701 * 2. This interface *is called* in context of user-mode ioctl. Which
6702 * makes it a bad place for *any* MST-related activity.
6705 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6706 !aconnector->fake_enable)
6707 connected = (aconnector->dc_sink != NULL);
6709 connected = (aconnector->base.force == DRM_FORCE_ON);
6711 update_subconnector_property(aconnector);
6713 return (connected ? connector_status_connected :
6714 connector_status_disconnected);
6717 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6718 struct drm_connector_state *connector_state,
6719 struct drm_property *property,
6722 struct drm_device *dev = connector->dev;
6723 struct amdgpu_device *adev = drm_to_adev(dev);
6724 struct dm_connector_state *dm_old_state =
6725 to_dm_connector_state(connector->state);
6726 struct dm_connector_state *dm_new_state =
6727 to_dm_connector_state(connector_state);
6731 if (property == dev->mode_config.scaling_mode_property) {
6732 enum amdgpu_rmx_type rmx_type;
6735 case DRM_MODE_SCALE_CENTER:
6736 rmx_type = RMX_CENTER;
6738 case DRM_MODE_SCALE_ASPECT:
6739 rmx_type = RMX_ASPECT;
6741 case DRM_MODE_SCALE_FULLSCREEN:
6742 rmx_type = RMX_FULL;
6744 case DRM_MODE_SCALE_NONE:
6750 if (dm_old_state->scaling == rmx_type)
6753 dm_new_state->scaling = rmx_type;
6755 } else if (property == adev->mode_info.underscan_hborder_property) {
6756 dm_new_state->underscan_hborder = val;
6758 } else if (property == adev->mode_info.underscan_vborder_property) {
6759 dm_new_state->underscan_vborder = val;
6761 } else if (property == adev->mode_info.underscan_property) {
6762 dm_new_state->underscan_enable = val;
6764 } else if (property == adev->mode_info.abm_level_property) {
6765 dm_new_state->abm_level = val;
6772 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6773 const struct drm_connector_state *state,
6774 struct drm_property *property,
6777 struct drm_device *dev = connector->dev;
6778 struct amdgpu_device *adev = drm_to_adev(dev);
6779 struct dm_connector_state *dm_state =
6780 to_dm_connector_state(state);
6783 if (property == dev->mode_config.scaling_mode_property) {
6784 switch (dm_state->scaling) {
6786 *val = DRM_MODE_SCALE_CENTER;
6789 *val = DRM_MODE_SCALE_ASPECT;
6792 *val = DRM_MODE_SCALE_FULLSCREEN;
6796 *val = DRM_MODE_SCALE_NONE;
6800 } else if (property == adev->mode_info.underscan_hborder_property) {
6801 *val = dm_state->underscan_hborder;
6803 } else if (property == adev->mode_info.underscan_vborder_property) {
6804 *val = dm_state->underscan_vborder;
6806 } else if (property == adev->mode_info.underscan_property) {
6807 *val = dm_state->underscan_enable;
6809 } else if (property == adev->mode_info.abm_level_property) {
6810 *val = dm_state->abm_level;
6817 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6819 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6821 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6824 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6826 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6827 const struct dc_link *link = aconnector->dc_link;
6828 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6829 struct amdgpu_display_manager *dm = &adev->dm;
6833 * Call only if mst_mgr was iniitalized before since it's not done
6834 * for all connector types.
6836 if (aconnector->mst_mgr.dev)
6837 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6839 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6840 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6841 for (i = 0; i < dm->num_of_edps; i++) {
6842 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6843 backlight_device_unregister(dm->backlight_dev[i]);
6844 dm->backlight_dev[i] = NULL;
6849 if (aconnector->dc_em_sink)
6850 dc_sink_release(aconnector->dc_em_sink);
6851 aconnector->dc_em_sink = NULL;
6852 if (aconnector->dc_sink)
6853 dc_sink_release(aconnector->dc_sink);
6854 aconnector->dc_sink = NULL;
6856 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6857 drm_connector_unregister(connector);
6858 drm_connector_cleanup(connector);
6859 if (aconnector->i2c) {
6860 i2c_del_adapter(&aconnector->i2c->base);
6861 kfree(aconnector->i2c);
6863 kfree(aconnector->dm_dp_aux.aux.name);
6868 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6870 struct dm_connector_state *state =
6871 to_dm_connector_state(connector->state);
6873 if (connector->state)
6874 __drm_atomic_helper_connector_destroy_state(connector->state);
6878 state = kzalloc(sizeof(*state), GFP_KERNEL);
6881 state->scaling = RMX_OFF;
6882 state->underscan_enable = false;
6883 state->underscan_hborder = 0;
6884 state->underscan_vborder = 0;
6885 state->base.max_requested_bpc = 8;
6886 state->vcpi_slots = 0;
6888 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6889 state->abm_level = amdgpu_dm_abm_level;
6891 __drm_atomic_helper_connector_reset(connector, &state->base);
6895 struct drm_connector_state *
6896 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6898 struct dm_connector_state *state =
6899 to_dm_connector_state(connector->state);
6901 struct dm_connector_state *new_state =
6902 kmemdup(state, sizeof(*state), GFP_KERNEL);
6907 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6909 new_state->freesync_capable = state->freesync_capable;
6910 new_state->abm_level = state->abm_level;
6911 new_state->scaling = state->scaling;
6912 new_state->underscan_enable = state->underscan_enable;
6913 new_state->underscan_hborder = state->underscan_hborder;
6914 new_state->underscan_vborder = state->underscan_vborder;
6915 new_state->vcpi_slots = state->vcpi_slots;
6916 new_state->pbn = state->pbn;
6917 return &new_state->base;
6921 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6923 struct amdgpu_dm_connector *amdgpu_dm_connector =
6924 to_amdgpu_dm_connector(connector);
6927 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6928 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6929 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6930 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6935 #if defined(CONFIG_DEBUG_FS)
6936 connector_debugfs_init(amdgpu_dm_connector);
6942 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6943 .reset = amdgpu_dm_connector_funcs_reset,
6944 .detect = amdgpu_dm_connector_detect,
6945 .fill_modes = drm_helper_probe_single_connector_modes,
6946 .destroy = amdgpu_dm_connector_destroy,
6947 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6948 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6949 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6950 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6951 .late_register = amdgpu_dm_connector_late_register,
6952 .early_unregister = amdgpu_dm_connector_unregister
6955 static int get_modes(struct drm_connector *connector)
6957 return amdgpu_dm_connector_get_modes(connector);
6960 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6962 struct dc_sink_init_data init_params = {
6963 .link = aconnector->dc_link,
6964 .sink_signal = SIGNAL_TYPE_VIRTUAL
6968 if (!aconnector->base.edid_blob_ptr) {
6969 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6970 aconnector->base.name);
6972 aconnector->base.force = DRM_FORCE_OFF;
6973 aconnector->base.override_edid = false;
6977 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6979 aconnector->edid = edid;
6981 aconnector->dc_em_sink = dc_link_add_remote_sink(
6982 aconnector->dc_link,
6984 (edid->extensions + 1) * EDID_LENGTH,
6987 if (aconnector->base.force == DRM_FORCE_ON) {
6988 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6989 aconnector->dc_link->local_sink :
6990 aconnector->dc_em_sink;
6991 dc_sink_retain(aconnector->dc_sink);
6995 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6997 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7000 * In case of headless boot with force on for DP managed connector
7001 * Those settings have to be != 0 to get initial modeset
7003 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7004 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7005 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7009 aconnector->base.override_edid = true;
7010 create_eml_sink(aconnector);
7013 static struct dc_stream_state *
7014 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7015 const struct drm_display_mode *drm_mode,
7016 const struct dm_connector_state *dm_state,
7017 const struct dc_stream_state *old_stream)
7019 struct drm_connector *connector = &aconnector->base;
7020 struct amdgpu_device *adev = drm_to_adev(connector->dev);
7021 struct dc_stream_state *stream;
7022 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7023 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7024 enum dc_status dc_result = DC_OK;
7027 stream = create_stream_for_sink(aconnector, drm_mode,
7028 dm_state, old_stream,
7030 if (stream == NULL) {
7031 DRM_ERROR("Failed to create stream for sink!\n");
7035 dc_result = dc_validate_stream(adev->dm.dc, stream);
7037 if (dc_result != DC_OK) {
7038 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7043 dc_status_to_str(dc_result));
7045 dc_stream_release(stream);
7047 requested_bpc -= 2; /* lower bpc to retry validation */
7050 } while (stream == NULL && requested_bpc >= 6);
7052 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7053 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7055 aconnector->force_yuv420_output = true;
7056 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7057 dm_state, old_stream);
7058 aconnector->force_yuv420_output = false;
7064 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7065 struct drm_display_mode *mode)
7067 int result = MODE_ERROR;
7068 struct dc_sink *dc_sink;
7069 /* TODO: Unhardcode stream count */
7070 struct dc_stream_state *stream;
7071 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7073 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7074 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7078 * Only run this the first time mode_valid is called to initilialize
7081 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7082 !aconnector->dc_em_sink)
7083 handle_edid_mgmt(aconnector);
7085 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7087 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7088 aconnector->base.force != DRM_FORCE_ON) {
7089 DRM_ERROR("dc_sink is NULL!\n");
7093 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7095 dc_stream_release(stream);
7100 /* TODO: error handling*/
7104 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7105 struct dc_info_packet *out)
7107 struct hdmi_drm_infoframe frame;
7108 unsigned char buf[30]; /* 26 + 4 */
7112 memset(out, 0, sizeof(*out));
7114 if (!state->hdr_output_metadata)
7117 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7121 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7125 /* Static metadata is a fixed 26 bytes + 4 byte header. */
7129 /* Prepare the infopacket for DC. */
7130 switch (state->connector->connector_type) {
7131 case DRM_MODE_CONNECTOR_HDMIA:
7132 out->hb0 = 0x87; /* type */
7133 out->hb1 = 0x01; /* version */
7134 out->hb2 = 0x1A; /* length */
7135 out->sb[0] = buf[3]; /* checksum */
7139 case DRM_MODE_CONNECTOR_DisplayPort:
7140 case DRM_MODE_CONNECTOR_eDP:
7141 out->hb0 = 0x00; /* sdp id, zero */
7142 out->hb1 = 0x87; /* type */
7143 out->hb2 = 0x1D; /* payload len - 1 */
7144 out->hb3 = (0x13 << 2); /* sdp version */
7145 out->sb[0] = 0x01; /* version */
7146 out->sb[1] = 0x1A; /* length */
7154 memcpy(&out->sb[i], &buf[4], 26);
7157 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7158 sizeof(out->sb), false);
7164 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7165 struct drm_atomic_state *state)
7167 struct drm_connector_state *new_con_state =
7168 drm_atomic_get_new_connector_state(state, conn);
7169 struct drm_connector_state *old_con_state =
7170 drm_atomic_get_old_connector_state(state, conn);
7171 struct drm_crtc *crtc = new_con_state->crtc;
7172 struct drm_crtc_state *new_crtc_state;
7175 trace_amdgpu_dm_connector_atomic_check(new_con_state);
7180 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7181 struct dc_info_packet hdr_infopacket;
7183 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7187 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7188 if (IS_ERR(new_crtc_state))
7189 return PTR_ERR(new_crtc_state);
7192 * DC considers the stream backends changed if the
7193 * static metadata changes. Forcing the modeset also
7194 * gives a simple way for userspace to switch from
7195 * 8bpc to 10bpc when setting the metadata to enter
7198 * Changing the static metadata after it's been
7199 * set is permissible, however. So only force a
7200 * modeset if we're entering or exiting HDR.
7202 new_crtc_state->mode_changed =
7203 !old_con_state->hdr_output_metadata ||
7204 !new_con_state->hdr_output_metadata;
7210 static const struct drm_connector_helper_funcs
7211 amdgpu_dm_connector_helper_funcs = {
7213 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7214 * modes will be filtered by drm_mode_validate_size(), and those modes
7215 * are missing after user start lightdm. So we need to renew modes list.
7216 * in get_modes call back, not just return the modes count
7218 .get_modes = get_modes,
7219 .mode_valid = amdgpu_dm_connector_mode_valid,
7220 .atomic_check = amdgpu_dm_connector_atomic_check,
7223 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7227 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7229 struct drm_atomic_state *state = new_crtc_state->state;
7230 struct drm_plane *plane;
7233 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7234 struct drm_plane_state *new_plane_state;
7236 /* Cursor planes are "fake". */
7237 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7240 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7242 if (!new_plane_state) {
7244 * The plane is enable on the CRTC and hasn't changed
7245 * state. This means that it previously passed
7246 * validation and is therefore enabled.
7252 /* We need a framebuffer to be considered enabled. */
7253 num_active += (new_plane_state->fb != NULL);
7259 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7260 struct drm_crtc_state *new_crtc_state)
7262 struct dm_crtc_state *dm_new_crtc_state =
7263 to_dm_crtc_state(new_crtc_state);
7265 dm_new_crtc_state->active_planes = 0;
7267 if (!dm_new_crtc_state->stream)
7270 dm_new_crtc_state->active_planes =
7271 count_crtc_active_planes(new_crtc_state);
7274 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7275 struct drm_atomic_state *state)
7277 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7279 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7280 struct dc *dc = adev->dm.dc;
7281 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7284 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7286 dm_update_crtc_active_planes(crtc, crtc_state);
7288 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7289 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7294 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7295 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7296 * planes are disabled, which is not supported by the hardware. And there is legacy
7297 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7299 if (crtc_state->enable &&
7300 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7301 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7305 /* In some use cases, like reset, no stream is attached */
7306 if (!dm_crtc_state->stream)
7309 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7312 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7316 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7317 const struct drm_display_mode *mode,
7318 struct drm_display_mode *adjusted_mode)
7323 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7324 .disable = dm_crtc_helper_disable,
7325 .atomic_check = dm_crtc_helper_atomic_check,
7326 .mode_fixup = dm_crtc_helper_mode_fixup,
7327 .get_scanout_position = amdgpu_crtc_get_scanout_position,
7330 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7335 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7337 switch (display_color_depth) {
7338 case COLOR_DEPTH_666:
7340 case COLOR_DEPTH_888:
7342 case COLOR_DEPTH_101010:
7344 case COLOR_DEPTH_121212:
7346 case COLOR_DEPTH_141414:
7348 case COLOR_DEPTH_161616:
7356 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7357 struct drm_crtc_state *crtc_state,
7358 struct drm_connector_state *conn_state)
7360 struct drm_atomic_state *state = crtc_state->state;
7361 struct drm_connector *connector = conn_state->connector;
7362 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7363 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7364 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7365 struct drm_dp_mst_topology_mgr *mst_mgr;
7366 struct drm_dp_mst_port *mst_port;
7367 enum dc_color_depth color_depth;
7369 bool is_y420 = false;
7371 if (!aconnector->port || !aconnector->dc_sink)
7374 mst_port = aconnector->port;
7375 mst_mgr = &aconnector->mst_port->mst_mgr;
7377 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7380 if (!state->duplicated) {
7381 int max_bpc = conn_state->max_requested_bpc;
7382 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7383 aconnector->force_yuv420_output;
7384 color_depth = convert_color_depth_from_display_info(connector,
7387 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7388 clock = adjusted_mode->clock;
7389 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7391 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7394 dm_new_connector_state->pbn,
7395 dm_mst_get_pbn_divider(aconnector->dc_link));
7396 if (dm_new_connector_state->vcpi_slots < 0) {
7397 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7398 return dm_new_connector_state->vcpi_slots;
7403 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7404 .disable = dm_encoder_helper_disable,
7405 .atomic_check = dm_encoder_helper_atomic_check
7408 #if defined(CONFIG_DRM_AMD_DC_DCN)
7409 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7410 struct dc_state *dc_state,
7411 struct dsc_mst_fairness_vars *vars)
7413 struct dc_stream_state *stream = NULL;
7414 struct drm_connector *connector;
7415 struct drm_connector_state *new_con_state;
7416 struct amdgpu_dm_connector *aconnector;
7417 struct dm_connector_state *dm_conn_state;
7419 int vcpi, pbn_div, pbn, slot_num = 0;
7421 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7423 aconnector = to_amdgpu_dm_connector(connector);
7425 if (!aconnector->port)
7428 if (!new_con_state || !new_con_state->crtc)
7431 dm_conn_state = to_dm_connector_state(new_con_state);
7433 for (j = 0; j < dc_state->stream_count; j++) {
7434 stream = dc_state->streams[j];
7438 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7447 pbn_div = dm_mst_get_pbn_divider(stream->link);
7448 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7449 for (j = 0; j < dc_state->stream_count; j++) {
7450 if (vars[j].aconnector == aconnector) {
7456 if (j == dc_state->stream_count)
7459 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7461 if (stream->timing.flags.DSC != 1) {
7462 dm_conn_state->pbn = pbn;
7463 dm_conn_state->vcpi_slots = slot_num;
7465 drm_dp_mst_atomic_enable_dsc(state,
7473 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7480 dm_conn_state->pbn = pbn;
7481 dm_conn_state->vcpi_slots = vcpi;
7487 static void dm_drm_plane_reset(struct drm_plane *plane)
7489 struct dm_plane_state *amdgpu_state = NULL;
7492 plane->funcs->atomic_destroy_state(plane, plane->state);
7494 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7495 WARN_ON(amdgpu_state == NULL);
7498 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7501 static struct drm_plane_state *
7502 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7504 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7506 old_dm_plane_state = to_dm_plane_state(plane->state);
7507 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7508 if (!dm_plane_state)
7511 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7513 if (old_dm_plane_state->dc_state) {
7514 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7515 dc_plane_state_retain(dm_plane_state->dc_state);
7518 return &dm_plane_state->base;
7521 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7522 struct drm_plane_state *state)
7524 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7526 if (dm_plane_state->dc_state)
7527 dc_plane_state_release(dm_plane_state->dc_state);
7529 drm_atomic_helper_plane_destroy_state(plane, state);
7532 static const struct drm_plane_funcs dm_plane_funcs = {
7533 .update_plane = drm_atomic_helper_update_plane,
7534 .disable_plane = drm_atomic_helper_disable_plane,
7535 .destroy = drm_primary_helper_destroy,
7536 .reset = dm_drm_plane_reset,
7537 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7538 .atomic_destroy_state = dm_drm_plane_destroy_state,
7539 .format_mod_supported = dm_plane_format_mod_supported,
7542 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7543 struct drm_plane_state *new_state)
7545 struct amdgpu_framebuffer *afb;
7546 struct drm_gem_object *obj;
7547 struct amdgpu_device *adev;
7548 struct amdgpu_bo *rbo;
7549 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7550 struct list_head list;
7551 struct ttm_validate_buffer tv;
7552 struct ww_acquire_ctx ticket;
7556 if (!new_state->fb) {
7557 DRM_DEBUG_KMS("No FB bound\n");
7561 afb = to_amdgpu_framebuffer(new_state->fb);
7562 obj = new_state->fb->obj[0];
7563 rbo = gem_to_amdgpu_bo(obj);
7564 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7565 INIT_LIST_HEAD(&list);
7569 list_add(&tv.head, &list);
7571 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7573 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7577 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7578 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7580 domain = AMDGPU_GEM_DOMAIN_VRAM;
7582 r = amdgpu_bo_pin(rbo, domain);
7583 if (unlikely(r != 0)) {
7584 if (r != -ERESTARTSYS)
7585 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7586 ttm_eu_backoff_reservation(&ticket, &list);
7590 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7591 if (unlikely(r != 0)) {
7592 amdgpu_bo_unpin(rbo);
7593 ttm_eu_backoff_reservation(&ticket, &list);
7594 DRM_ERROR("%p bind failed\n", rbo);
7598 ttm_eu_backoff_reservation(&ticket, &list);
7600 afb->address = amdgpu_bo_gpu_offset(rbo);
7605 * We don't do surface updates on planes that have been newly created,
7606 * but we also don't have the afb->address during atomic check.
7608 * Fill in buffer attributes depending on the address here, but only on
7609 * newly created planes since they're not being used by DC yet and this
7610 * won't modify global state.
7612 dm_plane_state_old = to_dm_plane_state(plane->state);
7613 dm_plane_state_new = to_dm_plane_state(new_state);
7615 if (dm_plane_state_new->dc_state &&
7616 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7617 struct dc_plane_state *plane_state =
7618 dm_plane_state_new->dc_state;
7619 bool force_disable_dcc = !plane_state->dcc.enable;
7621 fill_plane_buffer_attributes(
7622 adev, afb, plane_state->format, plane_state->rotation,
7624 &plane_state->tiling_info, &plane_state->plane_size,
7625 &plane_state->dcc, &plane_state->address,
7626 afb->tmz_surface, force_disable_dcc);
7632 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7633 struct drm_plane_state *old_state)
7635 struct amdgpu_bo *rbo;
7641 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7642 r = amdgpu_bo_reserve(rbo, false);
7644 DRM_ERROR("failed to reserve rbo before unpin\n");
7648 amdgpu_bo_unpin(rbo);
7649 amdgpu_bo_unreserve(rbo);
7650 amdgpu_bo_unref(&rbo);
7653 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7654 struct drm_crtc_state *new_crtc_state)
7656 struct drm_framebuffer *fb = state->fb;
7657 int min_downscale, max_upscale;
7659 int max_scale = INT_MAX;
7661 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7662 if (fb && state->crtc) {
7663 /* Validate viewport to cover the case when only the position changes */
7664 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7665 int viewport_width = state->crtc_w;
7666 int viewport_height = state->crtc_h;
7668 if (state->crtc_x < 0)
7669 viewport_width += state->crtc_x;
7670 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7671 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7673 if (state->crtc_y < 0)
7674 viewport_height += state->crtc_y;
7675 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7676 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7678 if (viewport_width < 0 || viewport_height < 0) {
7679 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7681 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7682 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7684 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7685 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7691 /* Get min/max allowed scaling factors from plane caps. */
7692 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7693 &min_downscale, &max_upscale);
7695 * Convert to drm convention: 16.16 fixed point, instead of dc's
7696 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7697 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7699 min_scale = (1000 << 16) / max_upscale;
7700 max_scale = (1000 << 16) / min_downscale;
7703 return drm_atomic_helper_check_plane_state(
7704 state, new_crtc_state, min_scale, max_scale, true, true);
7707 static int dm_plane_atomic_check(struct drm_plane *plane,
7708 struct drm_atomic_state *state)
7710 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7712 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7713 struct dc *dc = adev->dm.dc;
7714 struct dm_plane_state *dm_plane_state;
7715 struct dc_scaling_info scaling_info;
7716 struct drm_crtc_state *new_crtc_state;
7719 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7721 dm_plane_state = to_dm_plane_state(new_plane_state);
7723 if (!dm_plane_state->dc_state)
7727 drm_atomic_get_new_crtc_state(state,
7728 new_plane_state->crtc);
7729 if (!new_crtc_state)
7732 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7736 ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7740 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7746 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7747 struct drm_atomic_state *state)
7749 /* Only support async updates on cursor planes. */
7750 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7756 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7757 struct drm_atomic_state *state)
7759 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7761 struct drm_plane_state *old_state =
7762 drm_atomic_get_old_plane_state(state, plane);
7764 trace_amdgpu_dm_atomic_update_cursor(new_state);
7766 swap(plane->state->fb, new_state->fb);
7768 plane->state->src_x = new_state->src_x;
7769 plane->state->src_y = new_state->src_y;
7770 plane->state->src_w = new_state->src_w;
7771 plane->state->src_h = new_state->src_h;
7772 plane->state->crtc_x = new_state->crtc_x;
7773 plane->state->crtc_y = new_state->crtc_y;
7774 plane->state->crtc_w = new_state->crtc_w;
7775 plane->state->crtc_h = new_state->crtc_h;
7777 handle_cursor_update(plane, old_state);
7780 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7781 .prepare_fb = dm_plane_helper_prepare_fb,
7782 .cleanup_fb = dm_plane_helper_cleanup_fb,
7783 .atomic_check = dm_plane_atomic_check,
7784 .atomic_async_check = dm_plane_atomic_async_check,
7785 .atomic_async_update = dm_plane_atomic_async_update
7789 * TODO: these are currently initialized to rgb formats only.
7790 * For future use cases we should either initialize them dynamically based on
7791 * plane capabilities, or initialize this array to all formats, so internal drm
7792 * check will succeed, and let DC implement proper check
7794 static const uint32_t rgb_formats[] = {
7795 DRM_FORMAT_XRGB8888,
7796 DRM_FORMAT_ARGB8888,
7797 DRM_FORMAT_RGBA8888,
7798 DRM_FORMAT_XRGB2101010,
7799 DRM_FORMAT_XBGR2101010,
7800 DRM_FORMAT_ARGB2101010,
7801 DRM_FORMAT_ABGR2101010,
7802 DRM_FORMAT_XRGB16161616,
7803 DRM_FORMAT_XBGR16161616,
7804 DRM_FORMAT_ARGB16161616,
7805 DRM_FORMAT_ABGR16161616,
7806 DRM_FORMAT_XBGR8888,
7807 DRM_FORMAT_ABGR8888,
7811 static const uint32_t overlay_formats[] = {
7812 DRM_FORMAT_XRGB8888,
7813 DRM_FORMAT_ARGB8888,
7814 DRM_FORMAT_RGBA8888,
7815 DRM_FORMAT_XBGR8888,
7816 DRM_FORMAT_ABGR8888,
7820 static const u32 cursor_formats[] = {
7824 static int get_plane_formats(const struct drm_plane *plane,
7825 const struct dc_plane_cap *plane_cap,
7826 uint32_t *formats, int max_formats)
7828 int i, num_formats = 0;
7831 * TODO: Query support for each group of formats directly from
7832 * DC plane caps. This will require adding more formats to the
7836 switch (plane->type) {
7837 case DRM_PLANE_TYPE_PRIMARY:
7838 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7839 if (num_formats >= max_formats)
7842 formats[num_formats++] = rgb_formats[i];
7845 if (plane_cap && plane_cap->pixel_format_support.nv12)
7846 formats[num_formats++] = DRM_FORMAT_NV12;
7847 if (plane_cap && plane_cap->pixel_format_support.p010)
7848 formats[num_formats++] = DRM_FORMAT_P010;
7849 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7850 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7851 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7852 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7853 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7857 case DRM_PLANE_TYPE_OVERLAY:
7858 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7859 if (num_formats >= max_formats)
7862 formats[num_formats++] = overlay_formats[i];
7866 case DRM_PLANE_TYPE_CURSOR:
7867 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7868 if (num_formats >= max_formats)
7871 formats[num_formats++] = cursor_formats[i];
7879 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7880 struct drm_plane *plane,
7881 unsigned long possible_crtcs,
7882 const struct dc_plane_cap *plane_cap)
7884 uint32_t formats[32];
7887 unsigned int supported_rotations;
7888 uint64_t *modifiers = NULL;
7890 num_formats = get_plane_formats(plane, plane_cap, formats,
7891 ARRAY_SIZE(formats));
7893 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7897 if (modifiers == NULL)
7898 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
7900 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7901 &dm_plane_funcs, formats, num_formats,
7902 modifiers, plane->type, NULL);
7907 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7908 plane_cap && plane_cap->per_pixel_alpha) {
7909 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7910 BIT(DRM_MODE_BLEND_PREMULTI);
7912 drm_plane_create_alpha_property(plane);
7913 drm_plane_create_blend_mode_property(plane, blend_caps);
7916 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7918 (plane_cap->pixel_format_support.nv12 ||
7919 plane_cap->pixel_format_support.p010)) {
7920 /* This only affects YUV formats. */
7921 drm_plane_create_color_properties(
7923 BIT(DRM_COLOR_YCBCR_BT601) |
7924 BIT(DRM_COLOR_YCBCR_BT709) |
7925 BIT(DRM_COLOR_YCBCR_BT2020),
7926 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7927 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7928 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7931 supported_rotations =
7932 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7933 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7935 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7936 plane->type != DRM_PLANE_TYPE_CURSOR)
7937 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7938 supported_rotations);
7940 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7942 /* Create (reset) the plane state */
7943 if (plane->funcs->reset)
7944 plane->funcs->reset(plane);
7949 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7950 struct drm_plane *plane,
7951 uint32_t crtc_index)
7953 struct amdgpu_crtc *acrtc = NULL;
7954 struct drm_plane *cursor_plane;
7958 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7962 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7963 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7965 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7969 res = drm_crtc_init_with_planes(
7974 &amdgpu_dm_crtc_funcs, NULL);
7979 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7981 /* Create (reset) the plane state */
7982 if (acrtc->base.funcs->reset)
7983 acrtc->base.funcs->reset(&acrtc->base);
7985 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7986 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7988 acrtc->crtc_id = crtc_index;
7989 acrtc->base.enabled = false;
7990 acrtc->otg_inst = -1;
7992 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7993 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7994 true, MAX_COLOR_LUT_ENTRIES);
7995 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8001 kfree(cursor_plane);
8006 static int to_drm_connector_type(enum signal_type st)
8009 case SIGNAL_TYPE_HDMI_TYPE_A:
8010 return DRM_MODE_CONNECTOR_HDMIA;
8011 case SIGNAL_TYPE_EDP:
8012 return DRM_MODE_CONNECTOR_eDP;
8013 case SIGNAL_TYPE_LVDS:
8014 return DRM_MODE_CONNECTOR_LVDS;
8015 case SIGNAL_TYPE_RGB:
8016 return DRM_MODE_CONNECTOR_VGA;
8017 case SIGNAL_TYPE_DISPLAY_PORT:
8018 case SIGNAL_TYPE_DISPLAY_PORT_MST:
8019 return DRM_MODE_CONNECTOR_DisplayPort;
8020 case SIGNAL_TYPE_DVI_DUAL_LINK:
8021 case SIGNAL_TYPE_DVI_SINGLE_LINK:
8022 return DRM_MODE_CONNECTOR_DVID;
8023 case SIGNAL_TYPE_VIRTUAL:
8024 return DRM_MODE_CONNECTOR_VIRTUAL;
8027 return DRM_MODE_CONNECTOR_Unknown;
8031 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8033 struct drm_encoder *encoder;
8035 /* There is only one encoder per connector */
8036 drm_connector_for_each_possible_encoder(connector, encoder)
8042 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8044 struct drm_encoder *encoder;
8045 struct amdgpu_encoder *amdgpu_encoder;
8047 encoder = amdgpu_dm_connector_to_encoder(connector);
8049 if (encoder == NULL)
8052 amdgpu_encoder = to_amdgpu_encoder(encoder);
8054 amdgpu_encoder->native_mode.clock = 0;
8056 if (!list_empty(&connector->probed_modes)) {
8057 struct drm_display_mode *preferred_mode = NULL;
8059 list_for_each_entry(preferred_mode,
8060 &connector->probed_modes,
8062 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8063 amdgpu_encoder->native_mode = *preferred_mode;
8071 static struct drm_display_mode *
8072 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8074 int hdisplay, int vdisplay)
8076 struct drm_device *dev = encoder->dev;
8077 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8078 struct drm_display_mode *mode = NULL;
8079 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8081 mode = drm_mode_duplicate(dev, native_mode);
8086 mode->hdisplay = hdisplay;
8087 mode->vdisplay = vdisplay;
8088 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8089 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8095 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8096 struct drm_connector *connector)
8098 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8099 struct drm_display_mode *mode = NULL;
8100 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8101 struct amdgpu_dm_connector *amdgpu_dm_connector =
8102 to_amdgpu_dm_connector(connector);
8106 char name[DRM_DISPLAY_MODE_LEN];
8109 } common_modes[] = {
8110 { "640x480", 640, 480},
8111 { "800x600", 800, 600},
8112 { "1024x768", 1024, 768},
8113 { "1280x720", 1280, 720},
8114 { "1280x800", 1280, 800},
8115 {"1280x1024", 1280, 1024},
8116 { "1440x900", 1440, 900},
8117 {"1680x1050", 1680, 1050},
8118 {"1600x1200", 1600, 1200},
8119 {"1920x1080", 1920, 1080},
8120 {"1920x1200", 1920, 1200}
8123 n = ARRAY_SIZE(common_modes);
8125 for (i = 0; i < n; i++) {
8126 struct drm_display_mode *curmode = NULL;
8127 bool mode_existed = false;
8129 if (common_modes[i].w > native_mode->hdisplay ||
8130 common_modes[i].h > native_mode->vdisplay ||
8131 (common_modes[i].w == native_mode->hdisplay &&
8132 common_modes[i].h == native_mode->vdisplay))
8135 list_for_each_entry(curmode, &connector->probed_modes, head) {
8136 if (common_modes[i].w == curmode->hdisplay &&
8137 common_modes[i].h == curmode->vdisplay) {
8138 mode_existed = true;
8146 mode = amdgpu_dm_create_common_mode(encoder,
8147 common_modes[i].name, common_modes[i].w,
8149 drm_mode_probed_add(connector, mode);
8150 amdgpu_dm_connector->num_modes++;
8154 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8156 struct drm_encoder *encoder;
8157 struct amdgpu_encoder *amdgpu_encoder;
8158 const struct drm_display_mode *native_mode;
8160 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8161 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8164 encoder = amdgpu_dm_connector_to_encoder(connector);
8168 amdgpu_encoder = to_amdgpu_encoder(encoder);
8170 native_mode = &amdgpu_encoder->native_mode;
8171 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8174 drm_connector_set_panel_orientation_with_quirk(connector,
8175 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8176 native_mode->hdisplay,
8177 native_mode->vdisplay);
8180 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8183 struct amdgpu_dm_connector *amdgpu_dm_connector =
8184 to_amdgpu_dm_connector(connector);
8187 /* empty probed_modes */
8188 INIT_LIST_HEAD(&connector->probed_modes);
8189 amdgpu_dm_connector->num_modes =
8190 drm_add_edid_modes(connector, edid);
8192 /* sorting the probed modes before calling function
8193 * amdgpu_dm_get_native_mode() since EDID can have
8194 * more than one preferred mode. The modes that are
8195 * later in the probed mode list could be of higher
8196 * and preferred resolution. For example, 3840x2160
8197 * resolution in base EDID preferred timing and 4096x2160
8198 * preferred resolution in DID extension block later.
8200 drm_mode_sort(&connector->probed_modes);
8201 amdgpu_dm_get_native_mode(connector);
8203 /* Freesync capabilities are reset by calling
8204 * drm_add_edid_modes() and need to be
8207 amdgpu_dm_update_freesync_caps(connector, edid);
8209 amdgpu_set_panel_orientation(connector);
8211 amdgpu_dm_connector->num_modes = 0;
8215 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8216 struct drm_display_mode *mode)
8218 struct drm_display_mode *m;
8220 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8221 if (drm_mode_equal(m, mode))
8228 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8230 const struct drm_display_mode *m;
8231 struct drm_display_mode *new_mode;
8233 uint32_t new_modes_count = 0;
8235 /* Standard FPS values
8244 * 60 - Commonly used
8245 * 48,72,96,120 - Multiples of 24
8247 static const uint32_t common_rates[] = {
8248 23976, 24000, 25000, 29970, 30000,
8249 48000, 50000, 60000, 72000, 96000, 120000
8253 * Find mode with highest refresh rate with the same resolution
8254 * as the preferred mode. Some monitors report a preferred mode
8255 * with lower resolution than the highest refresh rate supported.
8258 m = get_highest_refresh_rate_mode(aconnector, true);
8262 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8263 uint64_t target_vtotal, target_vtotal_diff;
8266 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8269 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8270 common_rates[i] > aconnector->max_vfreq * 1000)
8273 num = (unsigned long long)m->clock * 1000 * 1000;
8274 den = common_rates[i] * (unsigned long long)m->htotal;
8275 target_vtotal = div_u64(num, den);
8276 target_vtotal_diff = target_vtotal - m->vtotal;
8278 /* Check for illegal modes */
8279 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8280 m->vsync_end + target_vtotal_diff < m->vsync_start ||
8281 m->vtotal + target_vtotal_diff < m->vsync_end)
8284 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8288 new_mode->vtotal += (u16)target_vtotal_diff;
8289 new_mode->vsync_start += (u16)target_vtotal_diff;
8290 new_mode->vsync_end += (u16)target_vtotal_diff;
8291 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8292 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8294 if (!is_duplicate_mode(aconnector, new_mode)) {
8295 drm_mode_probed_add(&aconnector->base, new_mode);
8296 new_modes_count += 1;
8298 drm_mode_destroy(aconnector->base.dev, new_mode);
8301 return new_modes_count;
8304 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8307 struct amdgpu_dm_connector *amdgpu_dm_connector =
8308 to_amdgpu_dm_connector(connector);
8310 if (!(amdgpu_freesync_vid_mode && edid))
8313 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8314 amdgpu_dm_connector->num_modes +=
8315 add_fs_modes(amdgpu_dm_connector);
8318 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8320 struct amdgpu_dm_connector *amdgpu_dm_connector =
8321 to_amdgpu_dm_connector(connector);
8322 struct drm_encoder *encoder;
8323 struct edid *edid = amdgpu_dm_connector->edid;
8325 encoder = amdgpu_dm_connector_to_encoder(connector);
8327 if (!drm_edid_is_valid(edid)) {
8328 amdgpu_dm_connector->num_modes =
8329 drm_add_modes_noedid(connector, 640, 480);
8331 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8332 amdgpu_dm_connector_add_common_modes(encoder, connector);
8333 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8335 amdgpu_dm_fbc_init(connector);
8337 return amdgpu_dm_connector->num_modes;
8340 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8341 struct amdgpu_dm_connector *aconnector,
8343 struct dc_link *link,
8346 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8349 * Some of the properties below require access to state, like bpc.
8350 * Allocate some default initial connector state with our reset helper.
8352 if (aconnector->base.funcs->reset)
8353 aconnector->base.funcs->reset(&aconnector->base);
8355 aconnector->connector_id = link_index;
8356 aconnector->dc_link = link;
8357 aconnector->base.interlace_allowed = false;
8358 aconnector->base.doublescan_allowed = false;
8359 aconnector->base.stereo_allowed = false;
8360 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8361 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8362 aconnector->audio_inst = -1;
8363 mutex_init(&aconnector->hpd_lock);
8366 * configure support HPD hot plug connector_>polled default value is 0
8367 * which means HPD hot plug not supported
8369 switch (connector_type) {
8370 case DRM_MODE_CONNECTOR_HDMIA:
8371 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8372 aconnector->base.ycbcr_420_allowed =
8373 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8375 case DRM_MODE_CONNECTOR_DisplayPort:
8376 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8377 link->link_enc = dp_get_link_enc(link);
8378 ASSERT(link->link_enc);
8380 aconnector->base.ycbcr_420_allowed =
8381 link->link_enc->features.dp_ycbcr420_supported ? true : false;
8383 case DRM_MODE_CONNECTOR_DVID:
8384 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8390 drm_object_attach_property(&aconnector->base.base,
8391 dm->ddev->mode_config.scaling_mode_property,
8392 DRM_MODE_SCALE_NONE);
8394 drm_object_attach_property(&aconnector->base.base,
8395 adev->mode_info.underscan_property,
8397 drm_object_attach_property(&aconnector->base.base,
8398 adev->mode_info.underscan_hborder_property,
8400 drm_object_attach_property(&aconnector->base.base,
8401 adev->mode_info.underscan_vborder_property,
8404 if (!aconnector->mst_port)
8405 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8407 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8408 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8409 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8411 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8412 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8413 drm_object_attach_property(&aconnector->base.base,
8414 adev->mode_info.abm_level_property, 0);
8417 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8418 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8419 connector_type == DRM_MODE_CONNECTOR_eDP) {
8420 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8422 if (!aconnector->mst_port)
8423 drm_connector_attach_vrr_capable_property(&aconnector->base);
8425 #ifdef CONFIG_DRM_AMD_DC_HDCP
8426 if (adev->dm.hdcp_workqueue)
8427 drm_connector_attach_content_protection_property(&aconnector->base, true);
8432 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8433 struct i2c_msg *msgs, int num)
8435 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8436 struct ddc_service *ddc_service = i2c->ddc_service;
8437 struct i2c_command cmd;
8441 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8446 cmd.number_of_payloads = num;
8447 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8450 for (i = 0; i < num; i++) {
8451 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8452 cmd.payloads[i].address = msgs[i].addr;
8453 cmd.payloads[i].length = msgs[i].len;
8454 cmd.payloads[i].data = msgs[i].buf;
8458 ddc_service->ctx->dc,
8459 ddc_service->ddc_pin->hw_info.ddc_channel,
8463 kfree(cmd.payloads);
8467 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8469 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8472 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8473 .master_xfer = amdgpu_dm_i2c_xfer,
8474 .functionality = amdgpu_dm_i2c_func,
8477 static struct amdgpu_i2c_adapter *
8478 create_i2c(struct ddc_service *ddc_service,
8482 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8483 struct amdgpu_i2c_adapter *i2c;
8485 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8488 i2c->base.owner = THIS_MODULE;
8489 i2c->base.class = I2C_CLASS_DDC;
8490 i2c->base.dev.parent = &adev->pdev->dev;
8491 i2c->base.algo = &amdgpu_dm_i2c_algo;
8492 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8493 i2c_set_adapdata(&i2c->base, i2c);
8494 i2c->ddc_service = ddc_service;
8495 if (i2c->ddc_service->ddc_pin)
8496 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8503 * Note: this function assumes that dc_link_detect() was called for the
8504 * dc_link which will be represented by this aconnector.
8506 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8507 struct amdgpu_dm_connector *aconnector,
8508 uint32_t link_index,
8509 struct amdgpu_encoder *aencoder)
8513 struct dc *dc = dm->dc;
8514 struct dc_link *link = dc_get_link_at_index(dc, link_index);
8515 struct amdgpu_i2c_adapter *i2c;
8517 link->priv = aconnector;
8519 DRM_DEBUG_DRIVER("%s()\n", __func__);
8521 i2c = create_i2c(link->ddc, link->link_index, &res);
8523 DRM_ERROR("Failed to create i2c adapter data\n");
8527 aconnector->i2c = i2c;
8528 res = i2c_add_adapter(&i2c->base);
8531 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8535 connector_type = to_drm_connector_type(link->connector_signal);
8537 res = drm_connector_init_with_ddc(
8540 &amdgpu_dm_connector_funcs,
8545 DRM_ERROR("connector_init failed\n");
8546 aconnector->connector_id = -1;
8550 drm_connector_helper_add(
8552 &amdgpu_dm_connector_helper_funcs);
8554 amdgpu_dm_connector_init_helper(
8561 drm_connector_attach_encoder(
8562 &aconnector->base, &aencoder->base);
8564 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8565 || connector_type == DRM_MODE_CONNECTOR_eDP)
8566 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8571 aconnector->i2c = NULL;
8576 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8578 switch (adev->mode_info.num_crtc) {
8595 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8596 struct amdgpu_encoder *aencoder,
8597 uint32_t link_index)
8599 struct amdgpu_device *adev = drm_to_adev(dev);
8601 int res = drm_encoder_init(dev,
8603 &amdgpu_dm_encoder_funcs,
8604 DRM_MODE_ENCODER_TMDS,
8607 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8610 aencoder->encoder_id = link_index;
8612 aencoder->encoder_id = -1;
8614 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8619 static void manage_dm_interrupts(struct amdgpu_device *adev,
8620 struct amdgpu_crtc *acrtc,
8624 * We have no guarantee that the frontend index maps to the same
8625 * backend index - some even map to more than one.
8627 * TODO: Use a different interrupt or check DC itself for the mapping.
8630 amdgpu_display_crtc_idx_to_irq_type(
8635 drm_crtc_vblank_on(&acrtc->base);
8638 &adev->pageflip_irq,
8640 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8647 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8655 &adev->pageflip_irq,
8657 drm_crtc_vblank_off(&acrtc->base);
8661 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8662 struct amdgpu_crtc *acrtc)
8665 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8668 * This reads the current state for the IRQ and force reapplies
8669 * the setting to hardware.
8671 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8675 is_scaling_state_different(const struct dm_connector_state *dm_state,
8676 const struct dm_connector_state *old_dm_state)
8678 if (dm_state->scaling != old_dm_state->scaling)
8680 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8681 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8683 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8684 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8686 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8687 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8692 #ifdef CONFIG_DRM_AMD_DC_HDCP
8693 static bool is_content_protection_different(struct drm_connector_state *state,
8694 const struct drm_connector_state *old_state,
8695 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8697 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8698 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8700 /* Handle: Type0/1 change */
8701 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8702 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8703 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8707 /* CP is being re enabled, ignore this
8709 * Handles: ENABLED -> DESIRED
8711 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8712 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8713 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8717 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8719 * Handles: UNDESIRED -> ENABLED
8721 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8722 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8723 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8725 /* Stream removed and re-enabled
8727 * Can sometimes overlap with the HPD case,
8728 * thus set update_hdcp to false to avoid
8729 * setting HDCP multiple times.
8731 * Handles: DESIRED -> DESIRED (Special case)
8733 if (!(old_state->crtc && old_state->crtc->enabled) &&
8734 state->crtc && state->crtc->enabled &&
8735 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8736 dm_con_state->update_hdcp = false;
8740 /* Hot-plug, headless s3, dpms
8742 * Only start HDCP if the display is connected/enabled.
8743 * update_hdcp flag will be set to false until the next
8746 * Handles: DESIRED -> DESIRED (Special case)
8748 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8749 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8750 dm_con_state->update_hdcp = false;
8755 * Handles: UNDESIRED -> UNDESIRED
8756 * DESIRED -> DESIRED
8757 * ENABLED -> ENABLED
8759 if (old_state->content_protection == state->content_protection)
8763 * Handles: UNDESIRED -> DESIRED
8764 * DESIRED -> UNDESIRED
8765 * ENABLED -> UNDESIRED
8767 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8771 * Handles: DESIRED -> ENABLED
8777 static void remove_stream(struct amdgpu_device *adev,
8778 struct amdgpu_crtc *acrtc,
8779 struct dc_stream_state *stream)
8781 /* this is the update mode case */
8783 acrtc->otg_inst = -1;
8784 acrtc->enabled = false;
8787 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8788 struct dc_cursor_position *position)
8790 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8792 int xorigin = 0, yorigin = 0;
8794 if (!crtc || !plane->state->fb)
8797 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8798 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8799 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8801 plane->state->crtc_w,
8802 plane->state->crtc_h);
8806 x = plane->state->crtc_x;
8807 y = plane->state->crtc_y;
8809 if (x <= -amdgpu_crtc->max_cursor_width ||
8810 y <= -amdgpu_crtc->max_cursor_height)
8814 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8818 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8821 position->enable = true;
8822 position->translate_by_source = true;
8825 position->x_hotspot = xorigin;
8826 position->y_hotspot = yorigin;
8831 static void handle_cursor_update(struct drm_plane *plane,
8832 struct drm_plane_state *old_plane_state)
8834 struct amdgpu_device *adev = drm_to_adev(plane->dev);
8835 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8836 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8837 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8838 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8839 uint64_t address = afb ? afb->address : 0;
8840 struct dc_cursor_position position = {0};
8841 struct dc_cursor_attributes attributes;
8844 if (!plane->state->fb && !old_plane_state->fb)
8847 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8849 amdgpu_crtc->crtc_id,
8850 plane->state->crtc_w,
8851 plane->state->crtc_h);
8853 ret = get_cursor_position(plane, crtc, &position);
8857 if (!position.enable) {
8858 /* turn off cursor */
8859 if (crtc_state && crtc_state->stream) {
8860 mutex_lock(&adev->dm.dc_lock);
8861 dc_stream_set_cursor_position(crtc_state->stream,
8863 mutex_unlock(&adev->dm.dc_lock);
8868 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8869 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8871 memset(&attributes, 0, sizeof(attributes));
8872 attributes.address.high_part = upper_32_bits(address);
8873 attributes.address.low_part = lower_32_bits(address);
8874 attributes.width = plane->state->crtc_w;
8875 attributes.height = plane->state->crtc_h;
8876 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8877 attributes.rotation_angle = 0;
8878 attributes.attribute_flags.value = 0;
8880 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8882 if (crtc_state->stream) {
8883 mutex_lock(&adev->dm.dc_lock);
8884 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8886 DRM_ERROR("DC failed to set cursor attributes\n");
8888 if (!dc_stream_set_cursor_position(crtc_state->stream,
8890 DRM_ERROR("DC failed to set cursor position\n");
8891 mutex_unlock(&adev->dm.dc_lock);
8895 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8898 assert_spin_locked(&acrtc->base.dev->event_lock);
8899 WARN_ON(acrtc->event);
8901 acrtc->event = acrtc->base.state->event;
8903 /* Set the flip status */
8904 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8906 /* Mark this event as consumed */
8907 acrtc->base.state->event = NULL;
8909 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8913 static void update_freesync_state_on_stream(
8914 struct amdgpu_display_manager *dm,
8915 struct dm_crtc_state *new_crtc_state,
8916 struct dc_stream_state *new_stream,
8917 struct dc_plane_state *surface,
8918 u32 flip_timestamp_in_us)
8920 struct mod_vrr_params vrr_params;
8921 struct dc_info_packet vrr_infopacket = {0};
8922 struct amdgpu_device *adev = dm->adev;
8923 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8924 unsigned long flags;
8925 bool pack_sdp_v1_3 = false;
8931 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8932 * For now it's sufficient to just guard against these conditions.
8935 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8938 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8939 vrr_params = acrtc->dm_irq_params.vrr_params;
8942 mod_freesync_handle_preflip(
8943 dm->freesync_module,
8946 flip_timestamp_in_us,
8949 if (adev->family < AMDGPU_FAMILY_AI &&
8950 amdgpu_dm_vrr_active(new_crtc_state)) {
8951 mod_freesync_handle_v_update(dm->freesync_module,
8952 new_stream, &vrr_params);
8954 /* Need to call this before the frame ends. */
8955 dc_stream_adjust_vmin_vmax(dm->dc,
8956 new_crtc_state->stream,
8957 &vrr_params.adjust);
8961 mod_freesync_build_vrr_infopacket(
8962 dm->freesync_module,
8966 TRANSFER_FUNC_UNKNOWN,
8970 new_crtc_state->freesync_timing_changed |=
8971 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8973 sizeof(vrr_params.adjust)) != 0);
8975 new_crtc_state->freesync_vrr_info_changed |=
8976 (memcmp(&new_crtc_state->vrr_infopacket,
8978 sizeof(vrr_infopacket)) != 0);
8980 acrtc->dm_irq_params.vrr_params = vrr_params;
8981 new_crtc_state->vrr_infopacket = vrr_infopacket;
8983 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8984 new_stream->vrr_infopacket = vrr_infopacket;
8986 if (new_crtc_state->freesync_vrr_info_changed)
8987 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8988 new_crtc_state->base.crtc->base.id,
8989 (int)new_crtc_state->base.vrr_enabled,
8990 (int)vrr_params.state);
8992 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8995 static void update_stream_irq_parameters(
8996 struct amdgpu_display_manager *dm,
8997 struct dm_crtc_state *new_crtc_state)
8999 struct dc_stream_state *new_stream = new_crtc_state->stream;
9000 struct mod_vrr_params vrr_params;
9001 struct mod_freesync_config config = new_crtc_state->freesync_config;
9002 struct amdgpu_device *adev = dm->adev;
9003 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9004 unsigned long flags;
9010 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9011 * For now it's sufficient to just guard against these conditions.
9013 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9016 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9017 vrr_params = acrtc->dm_irq_params.vrr_params;
9019 if (new_crtc_state->vrr_supported &&
9020 config.min_refresh_in_uhz &&
9021 config.max_refresh_in_uhz) {
9023 * if freesync compatible mode was set, config.state will be set
9026 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9027 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9028 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9029 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9030 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9031 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9032 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9034 config.state = new_crtc_state->base.vrr_enabled ?
9035 VRR_STATE_ACTIVE_VARIABLE :
9039 config.state = VRR_STATE_UNSUPPORTED;
9042 mod_freesync_build_vrr_params(dm->freesync_module,
9044 &config, &vrr_params);
9046 new_crtc_state->freesync_timing_changed |=
9047 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9048 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9050 new_crtc_state->freesync_config = config;
9051 /* Copy state for access from DM IRQ handler */
9052 acrtc->dm_irq_params.freesync_config = config;
9053 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9054 acrtc->dm_irq_params.vrr_params = vrr_params;
9055 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9058 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9059 struct dm_crtc_state *new_state)
9061 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9062 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9064 if (!old_vrr_active && new_vrr_active) {
9065 /* Transition VRR inactive -> active:
9066 * While VRR is active, we must not disable vblank irq, as a
9067 * reenable after disable would compute bogus vblank/pflip
9068 * timestamps if it likely happened inside display front-porch.
9070 * We also need vupdate irq for the actual core vblank handling
9073 dm_set_vupdate_irq(new_state->base.crtc, true);
9074 drm_crtc_vblank_get(new_state->base.crtc);
9075 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9076 __func__, new_state->base.crtc->base.id);
9077 } else if (old_vrr_active && !new_vrr_active) {
9078 /* Transition VRR active -> inactive:
9079 * Allow vblank irq disable again for fixed refresh rate.
9081 dm_set_vupdate_irq(new_state->base.crtc, false);
9082 drm_crtc_vblank_put(new_state->base.crtc);
9083 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9084 __func__, new_state->base.crtc->base.id);
9088 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9090 struct drm_plane *plane;
9091 struct drm_plane_state *old_plane_state;
9095 * TODO: Make this per-stream so we don't issue redundant updates for
9096 * commits with multiple streams.
9098 for_each_old_plane_in_state(state, plane, old_plane_state, i)
9099 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9100 handle_cursor_update(plane, old_plane_state);
9103 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9104 struct dc_state *dc_state,
9105 struct drm_device *dev,
9106 struct amdgpu_display_manager *dm,
9107 struct drm_crtc *pcrtc,
9108 bool wait_for_vblank)
9111 uint64_t timestamp_ns;
9112 struct drm_plane *plane;
9113 struct drm_plane_state *old_plane_state, *new_plane_state;
9114 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9115 struct drm_crtc_state *new_pcrtc_state =
9116 drm_atomic_get_new_crtc_state(state, pcrtc);
9117 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9118 struct dm_crtc_state *dm_old_crtc_state =
9119 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9120 int planes_count = 0, vpos, hpos;
9122 unsigned long flags;
9123 struct amdgpu_bo *abo;
9124 uint32_t target_vblank, last_flip_vblank;
9125 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9126 bool pflip_present = false;
9128 struct dc_surface_update surface_updates[MAX_SURFACES];
9129 struct dc_plane_info plane_infos[MAX_SURFACES];
9130 struct dc_scaling_info scaling_infos[MAX_SURFACES];
9131 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9132 struct dc_stream_update stream_update;
9135 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9138 dm_error("Failed to allocate update bundle\n");
9143 * Disable the cursor first if we're disabling all the planes.
9144 * It'll remain on the screen after the planes are re-enabled
9147 if (acrtc_state->active_planes == 0)
9148 amdgpu_dm_commit_cursors(state);
9150 /* update planes when needed */
9151 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9152 struct drm_crtc *crtc = new_plane_state->crtc;
9153 struct drm_crtc_state *new_crtc_state;
9154 struct drm_framebuffer *fb = new_plane_state->fb;
9155 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9156 bool plane_needs_flip;
9157 struct dc_plane_state *dc_plane;
9158 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9160 /* Cursor plane is handled after stream updates */
9161 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9164 if (!fb || !crtc || pcrtc != crtc)
9167 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9168 if (!new_crtc_state->active)
9171 dc_plane = dm_new_plane_state->dc_state;
9173 bundle->surface_updates[planes_count].surface = dc_plane;
9174 if (new_pcrtc_state->color_mgmt_changed) {
9175 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9176 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9177 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9180 fill_dc_scaling_info(dm->adev, new_plane_state,
9181 &bundle->scaling_infos[planes_count]);
9183 bundle->surface_updates[planes_count].scaling_info =
9184 &bundle->scaling_infos[planes_count];
9186 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9188 pflip_present = pflip_present || plane_needs_flip;
9190 if (!plane_needs_flip) {
9195 abo = gem_to_amdgpu_bo(fb->obj[0]);
9198 * Wait for all fences on this FB. Do limited wait to avoid
9199 * deadlock during GPU reset when this fence will not signal
9200 * but we hold reservation lock for the BO.
9202 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9203 msecs_to_jiffies(5000));
9204 if (unlikely(r <= 0))
9205 DRM_ERROR("Waiting for fences timed out!");
9207 fill_dc_plane_info_and_addr(
9208 dm->adev, new_plane_state,
9210 &bundle->plane_infos[planes_count],
9211 &bundle->flip_addrs[planes_count].address,
9212 afb->tmz_surface, false);
9214 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9215 new_plane_state->plane->index,
9216 bundle->plane_infos[planes_count].dcc.enable);
9218 bundle->surface_updates[planes_count].plane_info =
9219 &bundle->plane_infos[planes_count];
9222 * Only allow immediate flips for fast updates that don't
9223 * change FB pitch, DCC state, rotation or mirroing.
9225 bundle->flip_addrs[planes_count].flip_immediate =
9226 crtc->state->async_flip &&
9227 acrtc_state->update_type == UPDATE_TYPE_FAST;
9229 timestamp_ns = ktime_get_ns();
9230 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9231 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9232 bundle->surface_updates[planes_count].surface = dc_plane;
9234 if (!bundle->surface_updates[planes_count].surface) {
9235 DRM_ERROR("No surface for CRTC: id=%d\n",
9236 acrtc_attach->crtc_id);
9240 if (plane == pcrtc->primary)
9241 update_freesync_state_on_stream(
9244 acrtc_state->stream,
9246 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9248 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9250 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9251 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9257 if (pflip_present) {
9259 /* Use old throttling in non-vrr fixed refresh rate mode
9260 * to keep flip scheduling based on target vblank counts
9261 * working in a backwards compatible way, e.g., for
9262 * clients using the GLX_OML_sync_control extension or
9263 * DRI3/Present extension with defined target_msc.
9265 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9268 /* For variable refresh rate mode only:
9269 * Get vblank of last completed flip to avoid > 1 vrr
9270 * flips per video frame by use of throttling, but allow
9271 * flip programming anywhere in the possibly large
9272 * variable vrr vblank interval for fine-grained flip
9273 * timing control and more opportunity to avoid stutter
9274 * on late submission of flips.
9276 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9277 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9278 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9281 target_vblank = last_flip_vblank + wait_for_vblank;
9284 * Wait until we're out of the vertical blank period before the one
9285 * targeted by the flip
9287 while ((acrtc_attach->enabled &&
9288 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9289 0, &vpos, &hpos, NULL,
9290 NULL, &pcrtc->hwmode)
9291 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9292 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9293 (int)(target_vblank -
9294 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9295 usleep_range(1000, 1100);
9299 * Prepare the flip event for the pageflip interrupt to handle.
9301 * This only works in the case where we've already turned on the
9302 * appropriate hardware blocks (eg. HUBP) so in the transition case
9303 * from 0 -> n planes we have to skip a hardware generated event
9304 * and rely on sending it from software.
9306 if (acrtc_attach->base.state->event &&
9307 acrtc_state->active_planes > 0 &&
9308 !acrtc_state->force_dpms_off) {
9309 drm_crtc_vblank_get(pcrtc);
9311 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9313 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9314 prepare_flip_isr(acrtc_attach);
9316 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9319 if (acrtc_state->stream) {
9320 if (acrtc_state->freesync_vrr_info_changed)
9321 bundle->stream_update.vrr_infopacket =
9322 &acrtc_state->stream->vrr_infopacket;
9326 /* Update the planes if changed or disable if we don't have any. */
9327 if ((planes_count || acrtc_state->active_planes == 0) &&
9328 acrtc_state->stream) {
9329 #if defined(CONFIG_DRM_AMD_DC_DCN)
9331 * If PSR or idle optimizations are enabled then flush out
9332 * any pending work before hardware programming.
9334 if (dm->vblank_control_workqueue)
9335 flush_workqueue(dm->vblank_control_workqueue);
9338 bundle->stream_update.stream = acrtc_state->stream;
9339 if (new_pcrtc_state->mode_changed) {
9340 bundle->stream_update.src = acrtc_state->stream->src;
9341 bundle->stream_update.dst = acrtc_state->stream->dst;
9344 if (new_pcrtc_state->color_mgmt_changed) {
9346 * TODO: This isn't fully correct since we've actually
9347 * already modified the stream in place.
9349 bundle->stream_update.gamut_remap =
9350 &acrtc_state->stream->gamut_remap_matrix;
9351 bundle->stream_update.output_csc_transform =
9352 &acrtc_state->stream->csc_color_matrix;
9353 bundle->stream_update.out_transfer_func =
9354 acrtc_state->stream->out_transfer_func;
9357 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9358 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9359 bundle->stream_update.abm_level = &acrtc_state->abm_level;
9362 * If FreeSync state on the stream has changed then we need to
9363 * re-adjust the min/max bounds now that DC doesn't handle this
9364 * as part of commit.
9366 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9367 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9368 dc_stream_adjust_vmin_vmax(
9369 dm->dc, acrtc_state->stream,
9370 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9371 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9373 mutex_lock(&dm->dc_lock);
9374 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9375 acrtc_state->stream->link->psr_settings.psr_allow_active)
9376 amdgpu_dm_psr_disable(acrtc_state->stream);
9378 dc_commit_updates_for_stream(dm->dc,
9379 bundle->surface_updates,
9381 acrtc_state->stream,
9382 &bundle->stream_update,
9386 * Enable or disable the interrupts on the backend.
9388 * Most pipes are put into power gating when unused.
9390 * When power gating is enabled on a pipe we lose the
9391 * interrupt enablement state when power gating is disabled.
9393 * So we need to update the IRQ control state in hardware
9394 * whenever the pipe turns on (since it could be previously
9395 * power gated) or off (since some pipes can't be power gated
9398 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9399 dm_update_pflip_irq_state(drm_to_adev(dev),
9402 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9403 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9404 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9405 amdgpu_dm_link_setup_psr(acrtc_state->stream);
9407 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9408 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9409 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9410 struct amdgpu_dm_connector *aconn =
9411 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9413 if (aconn->psr_skip_count > 0)
9414 aconn->psr_skip_count--;
9416 /* Allow PSR when skip count is 0. */
9417 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9419 acrtc_attach->dm_irq_params.allow_psr_entry = false;
9422 mutex_unlock(&dm->dc_lock);
9426 * Update cursor state *after* programming all the planes.
9427 * This avoids redundant programming in the case where we're going
9428 * to be disabling a single plane - those pipes are being disabled.
9430 if (acrtc_state->active_planes)
9431 amdgpu_dm_commit_cursors(state);
9437 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9438 struct drm_atomic_state *state)
9440 struct amdgpu_device *adev = drm_to_adev(dev);
9441 struct amdgpu_dm_connector *aconnector;
9442 struct drm_connector *connector;
9443 struct drm_connector_state *old_con_state, *new_con_state;
9444 struct drm_crtc_state *new_crtc_state;
9445 struct dm_crtc_state *new_dm_crtc_state;
9446 const struct dc_stream_status *status;
9449 /* Notify device removals. */
9450 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9451 if (old_con_state->crtc != new_con_state->crtc) {
9452 /* CRTC changes require notification. */
9456 if (!new_con_state->crtc)
9459 new_crtc_state = drm_atomic_get_new_crtc_state(
9460 state, new_con_state->crtc);
9462 if (!new_crtc_state)
9465 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9469 aconnector = to_amdgpu_dm_connector(connector);
9471 mutex_lock(&adev->dm.audio_lock);
9472 inst = aconnector->audio_inst;
9473 aconnector->audio_inst = -1;
9474 mutex_unlock(&adev->dm.audio_lock);
9476 amdgpu_dm_audio_eld_notify(adev, inst);
9479 /* Notify audio device additions. */
9480 for_each_new_connector_in_state(state, connector, new_con_state, i) {
9481 if (!new_con_state->crtc)
9484 new_crtc_state = drm_atomic_get_new_crtc_state(
9485 state, new_con_state->crtc);
9487 if (!new_crtc_state)
9490 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9493 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9494 if (!new_dm_crtc_state->stream)
9497 status = dc_stream_get_status(new_dm_crtc_state->stream);
9501 aconnector = to_amdgpu_dm_connector(connector);
9503 mutex_lock(&adev->dm.audio_lock);
9504 inst = status->audio_inst;
9505 aconnector->audio_inst = inst;
9506 mutex_unlock(&adev->dm.audio_lock);
9508 amdgpu_dm_audio_eld_notify(adev, inst);
9513 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9514 * @crtc_state: the DRM CRTC state
9515 * @stream_state: the DC stream state.
9517 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9518 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9520 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9521 struct dc_stream_state *stream_state)
9523 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9527 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9528 * @state: The atomic state to commit
9530 * This will tell DC to commit the constructed DC state from atomic_check,
9531 * programming the hardware. Any failures here implies a hardware failure, since
9532 * atomic check should have filtered anything non-kosher.
9534 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9536 struct drm_device *dev = state->dev;
9537 struct amdgpu_device *adev = drm_to_adev(dev);
9538 struct amdgpu_display_manager *dm = &adev->dm;
9539 struct dm_atomic_state *dm_state;
9540 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9542 struct drm_crtc *crtc;
9543 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9544 unsigned long flags;
9545 bool wait_for_vblank = true;
9546 struct drm_connector *connector;
9547 struct drm_connector_state *old_con_state, *new_con_state;
9548 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9549 int crtc_disable_count = 0;
9550 bool mode_set_reset_required = false;
9552 trace_amdgpu_dm_atomic_commit_tail_begin(state);
9554 drm_atomic_helper_update_legacy_modeset_state(dev, state);
9556 dm_state = dm_atomic_get_new_state(state);
9557 if (dm_state && dm_state->context) {
9558 dc_state = dm_state->context;
9560 /* No state changes, retain current state. */
9561 dc_state_temp = dc_create_state(dm->dc);
9562 ASSERT(dc_state_temp);
9563 dc_state = dc_state_temp;
9564 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9567 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9568 new_crtc_state, i) {
9569 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9571 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9573 if (old_crtc_state->active &&
9574 (!new_crtc_state->active ||
9575 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9576 manage_dm_interrupts(adev, acrtc, false);
9577 dc_stream_release(dm_old_crtc_state->stream);
9581 drm_atomic_helper_calc_timestamping_constants(state);
9583 /* update changed items */
9584 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9585 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9587 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9588 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9591 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9592 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9593 "connectors_changed:%d\n",
9595 new_crtc_state->enable,
9596 new_crtc_state->active,
9597 new_crtc_state->planes_changed,
9598 new_crtc_state->mode_changed,
9599 new_crtc_state->active_changed,
9600 new_crtc_state->connectors_changed);
9602 /* Disable cursor if disabling crtc */
9603 if (old_crtc_state->active && !new_crtc_state->active) {
9604 struct dc_cursor_position position;
9606 memset(&position, 0, sizeof(position));
9607 mutex_lock(&dm->dc_lock);
9608 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9609 mutex_unlock(&dm->dc_lock);
9612 /* Copy all transient state flags into dc state */
9613 if (dm_new_crtc_state->stream) {
9614 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9615 dm_new_crtc_state->stream);
9618 /* handles headless hotplug case, updating new_state and
9619 * aconnector as needed
9622 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9624 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9626 if (!dm_new_crtc_state->stream) {
9628 * this could happen because of issues with
9629 * userspace notifications delivery.
9630 * In this case userspace tries to set mode on
9631 * display which is disconnected in fact.
9632 * dc_sink is NULL in this case on aconnector.
9633 * We expect reset mode will come soon.
9635 * This can also happen when unplug is done
9636 * during resume sequence ended
9638 * In this case, we want to pretend we still
9639 * have a sink to keep the pipe running so that
9640 * hw state is consistent with the sw state
9642 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9643 __func__, acrtc->base.base.id);
9647 if (dm_old_crtc_state->stream)
9648 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9650 pm_runtime_get_noresume(dev->dev);
9652 acrtc->enabled = true;
9653 acrtc->hw_mode = new_crtc_state->mode;
9654 crtc->hwmode = new_crtc_state->mode;
9655 mode_set_reset_required = true;
9656 } else if (modereset_required(new_crtc_state)) {
9657 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9658 /* i.e. reset mode */
9659 if (dm_old_crtc_state->stream)
9660 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9662 mode_set_reset_required = true;
9664 } /* for_each_crtc_in_state() */
9667 /* if there mode set or reset, disable eDP PSR */
9668 if (mode_set_reset_required) {
9669 #if defined(CONFIG_DRM_AMD_DC_DCN)
9670 if (dm->vblank_control_workqueue)
9671 flush_workqueue(dm->vblank_control_workqueue);
9673 amdgpu_dm_psr_disable_all(dm);
9676 dm_enable_per_frame_crtc_master_sync(dc_state);
9677 mutex_lock(&dm->dc_lock);
9678 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9679 #if defined(CONFIG_DRM_AMD_DC_DCN)
9680 /* Allow idle optimization when vblank count is 0 for display off */
9681 if (dm->active_vblank_irq_count == 0)
9682 dc_allow_idle_optimizations(dm->dc,true);
9684 mutex_unlock(&dm->dc_lock);
9687 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9688 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9690 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9692 if (dm_new_crtc_state->stream != NULL) {
9693 const struct dc_stream_status *status =
9694 dc_stream_get_status(dm_new_crtc_state->stream);
9697 status = dc_stream_get_status_from_state(dc_state,
9698 dm_new_crtc_state->stream);
9700 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9702 acrtc->otg_inst = status->primary_otg_inst;
9705 #ifdef CONFIG_DRM_AMD_DC_HDCP
9706 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9707 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9708 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9709 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9711 new_crtc_state = NULL;
9714 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9716 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9718 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9719 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9720 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9721 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9722 dm_new_con_state->update_hdcp = true;
9726 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9727 hdcp_update_display(
9728 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9729 new_con_state->hdcp_content_type,
9730 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9734 /* Handle connector state changes */
9735 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9736 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9737 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9738 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9739 struct dc_surface_update dummy_updates[MAX_SURFACES];
9740 struct dc_stream_update stream_update;
9741 struct dc_info_packet hdr_packet;
9742 struct dc_stream_status *status = NULL;
9743 bool abm_changed, hdr_changed, scaling_changed;
9745 memset(&dummy_updates, 0, sizeof(dummy_updates));
9746 memset(&stream_update, 0, sizeof(stream_update));
9749 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9750 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9753 /* Skip any modesets/resets */
9754 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9757 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9758 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9760 scaling_changed = is_scaling_state_different(dm_new_con_state,
9763 abm_changed = dm_new_crtc_state->abm_level !=
9764 dm_old_crtc_state->abm_level;
9767 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9769 if (!scaling_changed && !abm_changed && !hdr_changed)
9772 stream_update.stream = dm_new_crtc_state->stream;
9773 if (scaling_changed) {
9774 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9775 dm_new_con_state, dm_new_crtc_state->stream);
9777 stream_update.src = dm_new_crtc_state->stream->src;
9778 stream_update.dst = dm_new_crtc_state->stream->dst;
9782 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9784 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9788 fill_hdr_info_packet(new_con_state, &hdr_packet);
9789 stream_update.hdr_static_metadata = &hdr_packet;
9792 status = dc_stream_get_status(dm_new_crtc_state->stream);
9794 if (WARN_ON(!status))
9797 WARN_ON(!status->plane_count);
9800 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9801 * Here we create an empty update on each plane.
9802 * To fix this, DC should permit updating only stream properties.
9804 for (j = 0; j < status->plane_count; j++)
9805 dummy_updates[j].surface = status->plane_states[0];
9808 mutex_lock(&dm->dc_lock);
9809 dc_commit_updates_for_stream(dm->dc,
9811 status->plane_count,
9812 dm_new_crtc_state->stream,
9815 mutex_unlock(&dm->dc_lock);
9818 /* Count number of newly disabled CRTCs for dropping PM refs later. */
9819 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9820 new_crtc_state, i) {
9821 if (old_crtc_state->active && !new_crtc_state->active)
9822 crtc_disable_count++;
9824 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9825 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9827 /* For freesync config update on crtc state and params for irq */
9828 update_stream_irq_parameters(dm, dm_new_crtc_state);
9830 /* Handle vrr on->off / off->on transitions */
9831 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9836 * Enable interrupts for CRTCs that are newly enabled or went through
9837 * a modeset. It was intentionally deferred until after the front end
9838 * state was modified to wait until the OTG was on and so the IRQ
9839 * handlers didn't access stale or invalid state.
9841 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9842 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9843 #ifdef CONFIG_DEBUG_FS
9844 bool configure_crc = false;
9845 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9846 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9847 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9849 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9850 cur_crc_src = acrtc->dm_irq_params.crc_src;
9851 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9853 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9855 if (new_crtc_state->active &&
9856 (!old_crtc_state->active ||
9857 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9858 dc_stream_retain(dm_new_crtc_state->stream);
9859 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9860 manage_dm_interrupts(adev, acrtc, true);
9862 #ifdef CONFIG_DEBUG_FS
9864 * Frontend may have changed so reapply the CRC capture
9865 * settings for the stream.
9867 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9869 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9870 configure_crc = true;
9871 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9872 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9873 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9874 acrtc->dm_irq_params.crc_window.update_win = true;
9875 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9876 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9877 crc_rd_wrk->crtc = crtc;
9878 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9879 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9885 if (amdgpu_dm_crtc_configure_crc_source(
9886 crtc, dm_new_crtc_state, cur_crc_src))
9887 DRM_DEBUG_DRIVER("Failed to configure crc source");
9892 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9893 if (new_crtc_state->async_flip)
9894 wait_for_vblank = false;
9896 /* update planes when needed per crtc*/
9897 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9898 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9900 if (dm_new_crtc_state->stream)
9901 amdgpu_dm_commit_planes(state, dc_state, dev,
9902 dm, crtc, wait_for_vblank);
9905 /* Update audio instances for each connector. */
9906 amdgpu_dm_commit_audio(dev, state);
9908 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9909 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9910 /* restore the backlight level */
9911 for (i = 0; i < dm->num_of_edps; i++) {
9912 if (dm->backlight_dev[i] &&
9913 (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9914 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9918 * send vblank event on all events not handled in flip and
9919 * mark consumed event for drm_atomic_helper_commit_hw_done
9921 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9922 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9924 if (new_crtc_state->event)
9925 drm_send_event_locked(dev, &new_crtc_state->event->base);
9927 new_crtc_state->event = NULL;
9929 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9931 /* Signal HW programming completion */
9932 drm_atomic_helper_commit_hw_done(state);
9934 if (wait_for_vblank)
9935 drm_atomic_helper_wait_for_flip_done(dev, state);
9937 drm_atomic_helper_cleanup_planes(dev, state);
9939 /* return the stolen vga memory back to VRAM */
9940 if (!adev->mman.keep_stolen_vga_memory)
9941 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9942 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9945 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9946 * so we can put the GPU into runtime suspend if we're not driving any
9949 for (i = 0; i < crtc_disable_count; i++)
9950 pm_runtime_put_autosuspend(dev->dev);
9951 pm_runtime_mark_last_busy(dev->dev);
9954 dc_release_state(dc_state_temp);
9958 static int dm_force_atomic_commit(struct drm_connector *connector)
9961 struct drm_device *ddev = connector->dev;
9962 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9963 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9964 struct drm_plane *plane = disconnected_acrtc->base.primary;
9965 struct drm_connector_state *conn_state;
9966 struct drm_crtc_state *crtc_state;
9967 struct drm_plane_state *plane_state;
9972 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9974 /* Construct an atomic state to restore previous display setting */
9977 * Attach connectors to drm_atomic_state
9979 conn_state = drm_atomic_get_connector_state(state, connector);
9981 ret = PTR_ERR_OR_ZERO(conn_state);
9985 /* Attach crtc to drm_atomic_state*/
9986 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9988 ret = PTR_ERR_OR_ZERO(crtc_state);
9992 /* force a restore */
9993 crtc_state->mode_changed = true;
9995 /* Attach plane to drm_atomic_state */
9996 plane_state = drm_atomic_get_plane_state(state, plane);
9998 ret = PTR_ERR_OR_ZERO(plane_state);
10002 /* Call commit internally with the state we just constructed */
10003 ret = drm_atomic_commit(state);
10006 drm_atomic_state_put(state);
10008 DRM_ERROR("Restoring old state failed with %i\n", ret);
10014 * This function handles all cases when set mode does not come upon hotplug.
10015 * This includes when a display is unplugged then plugged back into the
10016 * same port and when running without usermode desktop manager supprot
10018 void dm_restore_drm_connector_state(struct drm_device *dev,
10019 struct drm_connector *connector)
10021 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10022 struct amdgpu_crtc *disconnected_acrtc;
10023 struct dm_crtc_state *acrtc_state;
10025 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10028 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10029 if (!disconnected_acrtc)
10032 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10033 if (!acrtc_state->stream)
10037 * If the previous sink is not released and different from the current,
10038 * we deduce we are in a state where we can not rely on usermode call
10039 * to turn on the display, so we do it here
10041 if (acrtc_state->stream->sink != aconnector->dc_sink)
10042 dm_force_atomic_commit(&aconnector->base);
10046 * Grabs all modesetting locks to serialize against any blocking commits,
10047 * Waits for completion of all non blocking commits.
10049 static int do_aquire_global_lock(struct drm_device *dev,
10050 struct drm_atomic_state *state)
10052 struct drm_crtc *crtc;
10053 struct drm_crtc_commit *commit;
10057 * Adding all modeset locks to aquire_ctx will
10058 * ensure that when the framework release it the
10059 * extra locks we are locking here will get released to
10061 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10065 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10066 spin_lock(&crtc->commit_lock);
10067 commit = list_first_entry_or_null(&crtc->commit_list,
10068 struct drm_crtc_commit, commit_entry);
10070 drm_crtc_commit_get(commit);
10071 spin_unlock(&crtc->commit_lock);
10077 * Make sure all pending HW programming completed and
10080 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10083 ret = wait_for_completion_interruptible_timeout(
10084 &commit->flip_done, 10*HZ);
10087 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10088 "timed out\n", crtc->base.id, crtc->name);
10090 drm_crtc_commit_put(commit);
10093 return ret < 0 ? ret : 0;
10096 static void get_freesync_config_for_crtc(
10097 struct dm_crtc_state *new_crtc_state,
10098 struct dm_connector_state *new_con_state)
10100 struct mod_freesync_config config = {0};
10101 struct amdgpu_dm_connector *aconnector =
10102 to_amdgpu_dm_connector(new_con_state->base.connector);
10103 struct drm_display_mode *mode = &new_crtc_state->base.mode;
10104 int vrefresh = drm_mode_vrefresh(mode);
10105 bool fs_vid_mode = false;
10107 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10108 vrefresh >= aconnector->min_vfreq &&
10109 vrefresh <= aconnector->max_vfreq;
10111 if (new_crtc_state->vrr_supported) {
10112 new_crtc_state->stream->ignore_msa_timing_param = true;
10113 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10115 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10116 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10117 config.vsif_supported = true;
10121 config.state = VRR_STATE_ACTIVE_FIXED;
10122 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10124 } else if (new_crtc_state->base.vrr_enabled) {
10125 config.state = VRR_STATE_ACTIVE_VARIABLE;
10127 config.state = VRR_STATE_INACTIVE;
10131 new_crtc_state->freesync_config = config;
10134 static void reset_freesync_config_for_crtc(
10135 struct dm_crtc_state *new_crtc_state)
10137 new_crtc_state->vrr_supported = false;
10139 memset(&new_crtc_state->vrr_infopacket, 0,
10140 sizeof(new_crtc_state->vrr_infopacket));
10144 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10145 struct drm_crtc_state *new_crtc_state)
10147 struct drm_display_mode old_mode, new_mode;
10149 if (!old_crtc_state || !new_crtc_state)
10152 old_mode = old_crtc_state->mode;
10153 new_mode = new_crtc_state->mode;
10155 if (old_mode.clock == new_mode.clock &&
10156 old_mode.hdisplay == new_mode.hdisplay &&
10157 old_mode.vdisplay == new_mode.vdisplay &&
10158 old_mode.htotal == new_mode.htotal &&
10159 old_mode.vtotal != new_mode.vtotal &&
10160 old_mode.hsync_start == new_mode.hsync_start &&
10161 old_mode.vsync_start != new_mode.vsync_start &&
10162 old_mode.hsync_end == new_mode.hsync_end &&
10163 old_mode.vsync_end != new_mode.vsync_end &&
10164 old_mode.hskew == new_mode.hskew &&
10165 old_mode.vscan == new_mode.vscan &&
10166 (old_mode.vsync_end - old_mode.vsync_start) ==
10167 (new_mode.vsync_end - new_mode.vsync_start))
10173 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10174 uint64_t num, den, res;
10175 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10177 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10179 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10180 den = (unsigned long long)new_crtc_state->mode.htotal *
10181 (unsigned long long)new_crtc_state->mode.vtotal;
10183 res = div_u64(num, den);
10184 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10187 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10188 struct drm_atomic_state *state,
10189 struct drm_crtc *crtc,
10190 struct drm_crtc_state *old_crtc_state,
10191 struct drm_crtc_state *new_crtc_state,
10193 bool *lock_and_validation_needed)
10195 struct dm_atomic_state *dm_state = NULL;
10196 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10197 struct dc_stream_state *new_stream;
10201 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10202 * update changed items
10204 struct amdgpu_crtc *acrtc = NULL;
10205 struct amdgpu_dm_connector *aconnector = NULL;
10206 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10207 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10211 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10212 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10213 acrtc = to_amdgpu_crtc(crtc);
10214 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10216 /* TODO This hack should go away */
10217 if (aconnector && enable) {
10218 /* Make sure fake sink is created in plug-in scenario */
10219 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10220 &aconnector->base);
10221 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10222 &aconnector->base);
10224 if (IS_ERR(drm_new_conn_state)) {
10225 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10229 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10230 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10232 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10235 new_stream = create_validate_stream_for_sink(aconnector,
10236 &new_crtc_state->mode,
10238 dm_old_crtc_state->stream);
10241 * we can have no stream on ACTION_SET if a display
10242 * was disconnected during S3, in this case it is not an
10243 * error, the OS will be updated after detection, and
10244 * will do the right thing on next atomic commit
10248 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10249 __func__, acrtc->base.base.id);
10255 * TODO: Check VSDB bits to decide whether this should
10256 * be enabled or not.
10258 new_stream->triggered_crtc_reset.enabled =
10259 dm->force_timing_sync;
10261 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10263 ret = fill_hdr_info_packet(drm_new_conn_state,
10264 &new_stream->hdr_static_metadata);
10269 * If we already removed the old stream from the context
10270 * (and set the new stream to NULL) then we can't reuse
10271 * the old stream even if the stream and scaling are unchanged.
10272 * We'll hit the BUG_ON and black screen.
10274 * TODO: Refactor this function to allow this check to work
10275 * in all conditions.
10277 if (amdgpu_freesync_vid_mode &&
10278 dm_new_crtc_state->stream &&
10279 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10282 if (dm_new_crtc_state->stream &&
10283 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10284 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10285 new_crtc_state->mode_changed = false;
10286 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10287 new_crtc_state->mode_changed);
10291 /* mode_changed flag may get updated above, need to check again */
10292 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10296 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10297 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10298 "connectors_changed:%d\n",
10300 new_crtc_state->enable,
10301 new_crtc_state->active,
10302 new_crtc_state->planes_changed,
10303 new_crtc_state->mode_changed,
10304 new_crtc_state->active_changed,
10305 new_crtc_state->connectors_changed);
10307 /* Remove stream for any changed/disabled CRTC */
10310 if (!dm_old_crtc_state->stream)
10313 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10314 is_timing_unchanged_for_freesync(new_crtc_state,
10316 new_crtc_state->mode_changed = false;
10318 "Mode change not required for front porch change, "
10319 "setting mode_changed to %d",
10320 new_crtc_state->mode_changed);
10322 set_freesync_fixed_config(dm_new_crtc_state);
10325 } else if (amdgpu_freesync_vid_mode && aconnector &&
10326 is_freesync_video_mode(&new_crtc_state->mode,
10328 struct drm_display_mode *high_mode;
10330 high_mode = get_highest_refresh_rate_mode(aconnector, false);
10331 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10332 set_freesync_fixed_config(dm_new_crtc_state);
10336 ret = dm_atomic_get_state(state, &dm_state);
10340 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10343 /* i.e. reset mode */
10344 if (dc_remove_stream_from_ctx(
10347 dm_old_crtc_state->stream) != DC_OK) {
10352 dc_stream_release(dm_old_crtc_state->stream);
10353 dm_new_crtc_state->stream = NULL;
10355 reset_freesync_config_for_crtc(dm_new_crtc_state);
10357 *lock_and_validation_needed = true;
10359 } else {/* Add stream for any updated/enabled CRTC */
10361 * Quick fix to prevent NULL pointer on new_stream when
10362 * added MST connectors not found in existing crtc_state in the chained mode
10363 * TODO: need to dig out the root cause of that
10365 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10368 if (modereset_required(new_crtc_state))
10371 if (modeset_required(new_crtc_state, new_stream,
10372 dm_old_crtc_state->stream)) {
10374 WARN_ON(dm_new_crtc_state->stream);
10376 ret = dm_atomic_get_state(state, &dm_state);
10380 dm_new_crtc_state->stream = new_stream;
10382 dc_stream_retain(new_stream);
10384 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10387 if (dc_add_stream_to_ctx(
10390 dm_new_crtc_state->stream) != DC_OK) {
10395 *lock_and_validation_needed = true;
10400 /* Release extra reference */
10402 dc_stream_release(new_stream);
10405 * We want to do dc stream updates that do not require a
10406 * full modeset below.
10408 if (!(enable && aconnector && new_crtc_state->active))
10411 * Given above conditions, the dc state cannot be NULL because:
10412 * 1. We're in the process of enabling CRTCs (just been added
10413 * to the dc context, or already is on the context)
10414 * 2. Has a valid connector attached, and
10415 * 3. Is currently active and enabled.
10416 * => The dc stream state currently exists.
10418 BUG_ON(dm_new_crtc_state->stream == NULL);
10420 /* Scaling or underscan settings */
10421 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10422 drm_atomic_crtc_needs_modeset(new_crtc_state))
10423 update_stream_scaling_settings(
10424 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10427 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10430 * Color management settings. We also update color properties
10431 * when a modeset is needed, to ensure it gets reprogrammed.
10433 if (dm_new_crtc_state->base.color_mgmt_changed ||
10434 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10435 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10440 /* Update Freesync settings. */
10441 get_freesync_config_for_crtc(dm_new_crtc_state,
10442 dm_new_conn_state);
10448 dc_stream_release(new_stream);
10452 static bool should_reset_plane(struct drm_atomic_state *state,
10453 struct drm_plane *plane,
10454 struct drm_plane_state *old_plane_state,
10455 struct drm_plane_state *new_plane_state)
10457 struct drm_plane *other;
10458 struct drm_plane_state *old_other_state, *new_other_state;
10459 struct drm_crtc_state *new_crtc_state;
10463 * TODO: Remove this hack once the checks below are sufficient
10464 * enough to determine when we need to reset all the planes on
10467 if (state->allow_modeset)
10470 /* Exit early if we know that we're adding or removing the plane. */
10471 if (old_plane_state->crtc != new_plane_state->crtc)
10474 /* old crtc == new_crtc == NULL, plane not in context. */
10475 if (!new_plane_state->crtc)
10479 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10481 if (!new_crtc_state)
10484 /* CRTC Degamma changes currently require us to recreate planes. */
10485 if (new_crtc_state->color_mgmt_changed)
10488 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10492 * If there are any new primary or overlay planes being added or
10493 * removed then the z-order can potentially change. To ensure
10494 * correct z-order and pipe acquisition the current DC architecture
10495 * requires us to remove and recreate all existing planes.
10497 * TODO: Come up with a more elegant solution for this.
10499 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10500 struct amdgpu_framebuffer *old_afb, *new_afb;
10501 if (other->type == DRM_PLANE_TYPE_CURSOR)
10504 if (old_other_state->crtc != new_plane_state->crtc &&
10505 new_other_state->crtc != new_plane_state->crtc)
10508 if (old_other_state->crtc != new_other_state->crtc)
10511 /* Src/dst size and scaling updates. */
10512 if (old_other_state->src_w != new_other_state->src_w ||
10513 old_other_state->src_h != new_other_state->src_h ||
10514 old_other_state->crtc_w != new_other_state->crtc_w ||
10515 old_other_state->crtc_h != new_other_state->crtc_h)
10518 /* Rotation / mirroring updates. */
10519 if (old_other_state->rotation != new_other_state->rotation)
10522 /* Blending updates. */
10523 if (old_other_state->pixel_blend_mode !=
10524 new_other_state->pixel_blend_mode)
10527 /* Alpha updates. */
10528 if (old_other_state->alpha != new_other_state->alpha)
10531 /* Colorspace changes. */
10532 if (old_other_state->color_range != new_other_state->color_range ||
10533 old_other_state->color_encoding != new_other_state->color_encoding)
10536 /* Framebuffer checks fall at the end. */
10537 if (!old_other_state->fb || !new_other_state->fb)
10540 /* Pixel format changes can require bandwidth updates. */
10541 if (old_other_state->fb->format != new_other_state->fb->format)
10544 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10545 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10547 /* Tiling and DCC changes also require bandwidth updates. */
10548 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10549 old_afb->base.modifier != new_afb->base.modifier)
10556 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10557 struct drm_plane_state *new_plane_state,
10558 struct drm_framebuffer *fb)
10560 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10561 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10562 unsigned int pitch;
10565 if (fb->width > new_acrtc->max_cursor_width ||
10566 fb->height > new_acrtc->max_cursor_height) {
10567 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10568 new_plane_state->fb->width,
10569 new_plane_state->fb->height);
10572 if (new_plane_state->src_w != fb->width << 16 ||
10573 new_plane_state->src_h != fb->height << 16) {
10574 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10578 /* Pitch in pixels */
10579 pitch = fb->pitches[0] / fb->format->cpp[0];
10581 if (fb->width != pitch) {
10582 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10591 /* FB pitch is supported by cursor plane */
10594 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10598 /* Core DRM takes care of checking FB modifiers, so we only need to
10599 * check tiling flags when the FB doesn't have a modifier. */
10600 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10601 if (adev->family < AMDGPU_FAMILY_AI) {
10602 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10603 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10604 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10606 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10609 DRM_DEBUG_ATOMIC("Cursor FB not linear");
10617 static int dm_update_plane_state(struct dc *dc,
10618 struct drm_atomic_state *state,
10619 struct drm_plane *plane,
10620 struct drm_plane_state *old_plane_state,
10621 struct drm_plane_state *new_plane_state,
10623 bool *lock_and_validation_needed)
10626 struct dm_atomic_state *dm_state = NULL;
10627 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10628 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10629 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10630 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10631 struct amdgpu_crtc *new_acrtc;
10636 new_plane_crtc = new_plane_state->crtc;
10637 old_plane_crtc = old_plane_state->crtc;
10638 dm_new_plane_state = to_dm_plane_state(new_plane_state);
10639 dm_old_plane_state = to_dm_plane_state(old_plane_state);
10641 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10642 if (!enable || !new_plane_crtc ||
10643 drm_atomic_plane_disabling(plane->state, new_plane_state))
10646 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10648 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10649 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10653 if (new_plane_state->fb) {
10654 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10655 new_plane_state->fb);
10663 needs_reset = should_reset_plane(state, plane, old_plane_state,
10666 /* Remove any changed/removed planes */
10671 if (!old_plane_crtc)
10674 old_crtc_state = drm_atomic_get_old_crtc_state(
10675 state, old_plane_crtc);
10676 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10678 if (!dm_old_crtc_state->stream)
10681 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10682 plane->base.id, old_plane_crtc->base.id);
10684 ret = dm_atomic_get_state(state, &dm_state);
10688 if (!dc_remove_plane_from_context(
10690 dm_old_crtc_state->stream,
10691 dm_old_plane_state->dc_state,
10692 dm_state->context)) {
10698 dc_plane_state_release(dm_old_plane_state->dc_state);
10699 dm_new_plane_state->dc_state = NULL;
10701 *lock_and_validation_needed = true;
10703 } else { /* Add new planes */
10704 struct dc_plane_state *dc_new_plane_state;
10706 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10709 if (!new_plane_crtc)
10712 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10713 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10715 if (!dm_new_crtc_state->stream)
10721 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10725 WARN_ON(dm_new_plane_state->dc_state);
10727 dc_new_plane_state = dc_create_plane_state(dc);
10728 if (!dc_new_plane_state)
10731 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10732 plane->base.id, new_plane_crtc->base.id);
10734 ret = fill_dc_plane_attributes(
10735 drm_to_adev(new_plane_crtc->dev),
10736 dc_new_plane_state,
10740 dc_plane_state_release(dc_new_plane_state);
10744 ret = dm_atomic_get_state(state, &dm_state);
10746 dc_plane_state_release(dc_new_plane_state);
10751 * Any atomic check errors that occur after this will
10752 * not need a release. The plane state will be attached
10753 * to the stream, and therefore part of the atomic
10754 * state. It'll be released when the atomic state is
10757 if (!dc_add_plane_to_context(
10759 dm_new_crtc_state->stream,
10760 dc_new_plane_state,
10761 dm_state->context)) {
10763 dc_plane_state_release(dc_new_plane_state);
10767 dm_new_plane_state->dc_state = dc_new_plane_state;
10769 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10771 /* Tell DC to do a full surface update every time there
10772 * is a plane change. Inefficient, but works for now.
10774 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10776 *lock_and_validation_needed = true;
10783 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10784 int *src_w, int *src_h)
10786 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10787 case DRM_MODE_ROTATE_90:
10788 case DRM_MODE_ROTATE_270:
10789 *src_w = plane_state->src_h >> 16;
10790 *src_h = plane_state->src_w >> 16;
10792 case DRM_MODE_ROTATE_0:
10793 case DRM_MODE_ROTATE_180:
10795 *src_w = plane_state->src_w >> 16;
10796 *src_h = plane_state->src_h >> 16;
10801 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10802 struct drm_crtc *crtc,
10803 struct drm_crtc_state *new_crtc_state)
10805 struct drm_plane *cursor = crtc->cursor, *underlying;
10806 struct drm_plane_state *new_cursor_state, *new_underlying_state;
10808 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10809 int cursor_src_w, cursor_src_h;
10810 int underlying_src_w, underlying_src_h;
10812 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10813 * cursor per pipe but it's going to inherit the scaling and
10814 * positioning from the underlying pipe. Check the cursor plane's
10815 * blending properties match the underlying planes'. */
10817 new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10818 if (!new_cursor_state || !new_cursor_state->fb) {
10822 dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10823 cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10824 cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10826 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10827 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10828 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10831 /* Ignore disabled planes */
10832 if (!new_underlying_state->fb)
10835 dm_get_oriented_plane_size(new_underlying_state,
10836 &underlying_src_w, &underlying_src_h);
10837 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10838 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10840 if (cursor_scale_w != underlying_scale_w ||
10841 cursor_scale_h != underlying_scale_h) {
10842 drm_dbg_atomic(crtc->dev,
10843 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10844 cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10848 /* If this plane covers the whole CRTC, no need to check planes underneath */
10849 if (new_underlying_state->crtc_x <= 0 &&
10850 new_underlying_state->crtc_y <= 0 &&
10851 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10852 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10859 #if defined(CONFIG_DRM_AMD_DC_DCN)
10860 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10862 struct drm_connector *connector;
10863 struct drm_connector_state *conn_state;
10864 struct amdgpu_dm_connector *aconnector = NULL;
10866 for_each_new_connector_in_state(state, connector, conn_state, i) {
10867 if (conn_state->crtc != crtc)
10870 aconnector = to_amdgpu_dm_connector(connector);
10871 if (!aconnector->port || !aconnector->mst_port)
10880 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10885 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10886 * @dev: The DRM device
10887 * @state: The atomic state to commit
10889 * Validate that the given atomic state is programmable by DC into hardware.
10890 * This involves constructing a &struct dc_state reflecting the new hardware
10891 * state we wish to commit, then querying DC to see if it is programmable. It's
10892 * important not to modify the existing DC state. Otherwise, atomic_check
10893 * may unexpectedly commit hardware changes.
10895 * When validating the DC state, it's important that the right locks are
10896 * acquired. For full updates case which removes/adds/updates streams on one
10897 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10898 * that any such full update commit will wait for completion of any outstanding
10899 * flip using DRMs synchronization events.
10901 * Note that DM adds the affected connectors for all CRTCs in state, when that
10902 * might not seem necessary. This is because DC stream creation requires the
10903 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10904 * be possible but non-trivial - a possible TODO item.
10906 * Return: -Error code if validation failed.
10908 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10909 struct drm_atomic_state *state)
10911 struct amdgpu_device *adev = drm_to_adev(dev);
10912 struct dm_atomic_state *dm_state = NULL;
10913 struct dc *dc = adev->dm.dc;
10914 struct drm_connector *connector;
10915 struct drm_connector_state *old_con_state, *new_con_state;
10916 struct drm_crtc *crtc;
10917 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10918 struct drm_plane *plane;
10919 struct drm_plane_state *old_plane_state, *new_plane_state;
10920 enum dc_status status;
10922 bool lock_and_validation_needed = false;
10923 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10924 #if defined(CONFIG_DRM_AMD_DC_DCN)
10925 struct dsc_mst_fairness_vars vars[MAX_PIPES];
10926 struct drm_dp_mst_topology_state *mst_state;
10927 struct drm_dp_mst_topology_mgr *mgr;
10930 trace_amdgpu_dm_atomic_check_begin(state);
10932 ret = drm_atomic_helper_check_modeset(dev, state);
10934 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10938 /* Check connector changes */
10939 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10940 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10941 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10943 /* Skip connectors that are disabled or part of modeset already. */
10944 if (!old_con_state->crtc && !new_con_state->crtc)
10947 if (!new_con_state->crtc)
10950 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10951 if (IS_ERR(new_crtc_state)) {
10952 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10953 ret = PTR_ERR(new_crtc_state);
10957 if (dm_old_con_state->abm_level !=
10958 dm_new_con_state->abm_level)
10959 new_crtc_state->connectors_changed = true;
10962 #if defined(CONFIG_DRM_AMD_DC_DCN)
10963 if (dc_resource_is_dsc_encoding_supported(dc)) {
10964 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10965 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10966 ret = add_affected_mst_dsc_crtcs(state, crtc);
10968 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
10975 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10976 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10978 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10979 !new_crtc_state->color_mgmt_changed &&
10980 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10981 dm_old_crtc_state->dsc_force_changed == false)
10984 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10986 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
10990 if (!new_crtc_state->enable)
10993 ret = drm_atomic_add_affected_connectors(state, crtc);
10995 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
10999 ret = drm_atomic_add_affected_planes(state, crtc);
11001 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11005 if (dm_old_crtc_state->dsc_force_changed)
11006 new_crtc_state->mode_changed = true;
11010 * Add all primary and overlay planes on the CRTC to the state
11011 * whenever a plane is enabled to maintain correct z-ordering
11012 * and to enable fast surface updates.
11014 drm_for_each_crtc(crtc, dev) {
11015 bool modified = false;
11017 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11018 if (plane->type == DRM_PLANE_TYPE_CURSOR)
11021 if (new_plane_state->crtc == crtc ||
11022 old_plane_state->crtc == crtc) {
11031 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11032 if (plane->type == DRM_PLANE_TYPE_CURSOR)
11036 drm_atomic_get_plane_state(state, plane);
11038 if (IS_ERR(new_plane_state)) {
11039 ret = PTR_ERR(new_plane_state);
11040 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11046 /* Remove exiting planes if they are modified */
11047 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11048 ret = dm_update_plane_state(dc, state, plane,
11052 &lock_and_validation_needed);
11054 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11059 /* Disable all crtcs which require disable */
11060 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11061 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11065 &lock_and_validation_needed);
11067 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11072 /* Enable all crtcs which require enable */
11073 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11074 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11078 &lock_and_validation_needed);
11080 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11085 /* Add new/modified planes */
11086 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11087 ret = dm_update_plane_state(dc, state, plane,
11091 &lock_and_validation_needed);
11093 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11098 /* Run this here since we want to validate the streams we created */
11099 ret = drm_atomic_helper_check_planes(dev, state);
11101 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11105 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11106 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11107 if (dm_new_crtc_state->mpo_requested)
11108 DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11111 /* Check cursor planes scaling */
11112 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11113 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11115 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11120 if (state->legacy_cursor_update) {
11122 * This is a fast cursor update coming from the plane update
11123 * helper, check if it can be done asynchronously for better
11126 state->async_update =
11127 !drm_atomic_helper_async_check(dev, state);
11130 * Skip the remaining global validation if this is an async
11131 * update. Cursor updates can be done without affecting
11132 * state or bandwidth calcs and this avoids the performance
11133 * penalty of locking the private state object and
11134 * allocating a new dc_state.
11136 if (state->async_update)
11140 /* Check scaling and underscan changes*/
11141 /* TODO Removed scaling changes validation due to inability to commit
11142 * new stream into context w\o causing full reset. Need to
11143 * decide how to handle.
11145 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11146 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11147 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11148 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11150 /* Skip any modesets/resets */
11151 if (!acrtc || drm_atomic_crtc_needs_modeset(
11152 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11155 /* Skip any thing not scale or underscan changes */
11156 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11159 lock_and_validation_needed = true;
11162 #if defined(CONFIG_DRM_AMD_DC_DCN)
11163 /* set the slot info for each mst_state based on the link encoding format */
11164 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11165 struct amdgpu_dm_connector *aconnector;
11166 struct drm_connector *connector;
11167 struct drm_connector_list_iter iter;
11168 u8 link_coding_cap;
11170 if (!mgr->mst_state )
11173 drm_connector_list_iter_begin(dev, &iter);
11174 drm_for_each_connector_iter(connector, &iter) {
11175 int id = connector->index;
11177 if (id == mst_state->mgr->conn_base_id) {
11178 aconnector = to_amdgpu_dm_connector(connector);
11179 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11180 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11185 drm_connector_list_iter_end(&iter);
11190 * Streams and planes are reset when there are changes that affect
11191 * bandwidth. Anything that affects bandwidth needs to go through
11192 * DC global validation to ensure that the configuration can be applied
11195 * We have to currently stall out here in atomic_check for outstanding
11196 * commits to finish in this case because our IRQ handlers reference
11197 * DRM state directly - we can end up disabling interrupts too early
11200 * TODO: Remove this stall and drop DM state private objects.
11202 if (lock_and_validation_needed) {
11203 ret = dm_atomic_get_state(state, &dm_state);
11205 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11209 ret = do_aquire_global_lock(dev, state);
11211 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11215 #if defined(CONFIG_DRM_AMD_DC_DCN)
11216 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11217 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11221 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11223 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11229 * Perform validation of MST topology in the state:
11230 * We need to perform MST atomic check before calling
11231 * dc_validate_global_state(), or there is a chance
11232 * to get stuck in an infinite loop and hang eventually.
11234 ret = drm_dp_mst_atomic_check(state);
11236 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11239 status = dc_validate_global_state(dc, dm_state->context, true);
11240 if (status != DC_OK) {
11241 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11242 dc_status_to_str(status), status);
11248 * The commit is a fast update. Fast updates shouldn't change
11249 * the DC context, affect global validation, and can have their
11250 * commit work done in parallel with other commits not touching
11251 * the same resource. If we have a new DC context as part of
11252 * the DM atomic state from validation we need to free it and
11253 * retain the existing one instead.
11255 * Furthermore, since the DM atomic state only contains the DC
11256 * context and can safely be annulled, we can free the state
11257 * and clear the associated private object now to free
11258 * some memory and avoid a possible use-after-free later.
11261 for (i = 0; i < state->num_private_objs; i++) {
11262 struct drm_private_obj *obj = state->private_objs[i].ptr;
11264 if (obj->funcs == adev->dm.atomic_obj.funcs) {
11265 int j = state->num_private_objs-1;
11267 dm_atomic_destroy_state(obj,
11268 state->private_objs[i].state);
11270 /* If i is not at the end of the array then the
11271 * last element needs to be moved to where i was
11272 * before the array can safely be truncated.
11275 state->private_objs[i] =
11276 state->private_objs[j];
11278 state->private_objs[j].ptr = NULL;
11279 state->private_objs[j].state = NULL;
11280 state->private_objs[j].old_state = NULL;
11281 state->private_objs[j].new_state = NULL;
11283 state->num_private_objs = j;
11289 /* Store the overall update type for use later in atomic check. */
11290 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11291 struct dm_crtc_state *dm_new_crtc_state =
11292 to_dm_crtc_state(new_crtc_state);
11294 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11299 /* Must be success */
11302 trace_amdgpu_dm_atomic_check_finish(state, ret);
11307 if (ret == -EDEADLK)
11308 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11309 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11310 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11312 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11314 trace_amdgpu_dm_atomic_check_finish(state, ret);
11319 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11320 struct amdgpu_dm_connector *amdgpu_dm_connector)
11323 bool capable = false;
11325 if (amdgpu_dm_connector->dc_link &&
11326 dm_helpers_dp_read_dpcd(
11328 amdgpu_dm_connector->dc_link,
11329 DP_DOWN_STREAM_PORT_COUNT,
11331 sizeof(dpcd_data))) {
11332 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11338 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11339 unsigned int offset,
11340 unsigned int total_length,
11342 unsigned int length,
11343 struct amdgpu_hdmi_vsdb_info *vsdb)
11346 union dmub_rb_cmd cmd;
11347 struct dmub_cmd_send_edid_cea *input;
11348 struct dmub_cmd_edid_cea_output *output;
11350 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11353 memset(&cmd, 0, sizeof(cmd));
11355 input = &cmd.edid_cea.data.input;
11357 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11358 cmd.edid_cea.header.sub_type = 0;
11359 cmd.edid_cea.header.payload_bytes =
11360 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11361 input->offset = offset;
11362 input->length = length;
11363 input->cea_total_length = total_length;
11364 memcpy(input->payload, data, length);
11366 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11368 DRM_ERROR("EDID CEA parser failed\n");
11372 output = &cmd.edid_cea.data.output;
11374 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11375 if (!output->ack.success) {
11376 DRM_ERROR("EDID CEA ack failed at offset %d\n",
11377 output->ack.offset);
11379 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11380 if (!output->amd_vsdb.vsdb_found)
11383 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11384 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11385 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11386 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11388 DRM_WARN("Unknown EDID CEA parser results\n");
11395 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11396 uint8_t *edid_ext, int len,
11397 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11401 /* send extension block to DMCU for parsing */
11402 for (i = 0; i < len; i += 8) {
11406 /* send 8 bytes a time */
11407 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11411 /* EDID block sent completed, expect result */
11412 int version, min_rate, max_rate;
11414 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11416 /* amd vsdb found */
11417 vsdb_info->freesync_supported = 1;
11418 vsdb_info->amd_vsdb_version = version;
11419 vsdb_info->min_refresh_rate_hz = min_rate;
11420 vsdb_info->max_refresh_rate_hz = max_rate;
11428 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11436 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11437 uint8_t *edid_ext, int len,
11438 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11442 /* send extension block to DMCU for parsing */
11443 for (i = 0; i < len; i += 8) {
11444 /* send 8 bytes a time */
11445 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11449 return vsdb_info->freesync_supported;
11452 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11453 uint8_t *edid_ext, int len,
11454 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11456 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11458 if (adev->dm.dmub_srv)
11459 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11461 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11464 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11465 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11467 uint8_t *edid_ext = NULL;
11469 bool valid_vsdb_found = false;
11471 /*----- drm_find_cea_extension() -----*/
11472 /* No EDID or EDID extensions */
11473 if (edid == NULL || edid->extensions == 0)
11476 /* Find CEA extension */
11477 for (i = 0; i < edid->extensions; i++) {
11478 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11479 if (edid_ext[0] == CEA_EXT)
11483 if (i == edid->extensions)
11486 /*----- cea_db_offsets() -----*/
11487 if (edid_ext[0] != CEA_EXT)
11490 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11492 return valid_vsdb_found ? i : -ENODEV;
11495 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11499 struct detailed_timing *timing;
11500 struct detailed_non_pixel *data;
11501 struct detailed_data_monitor_range *range;
11502 struct amdgpu_dm_connector *amdgpu_dm_connector =
11503 to_amdgpu_dm_connector(connector);
11504 struct dm_connector_state *dm_con_state = NULL;
11505 struct dc_sink *sink;
11507 struct drm_device *dev = connector->dev;
11508 struct amdgpu_device *adev = drm_to_adev(dev);
11509 bool freesync_capable = false;
11510 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11512 if (!connector->state) {
11513 DRM_ERROR("%s - Connector has no state", __func__);
11517 sink = amdgpu_dm_connector->dc_sink ?
11518 amdgpu_dm_connector->dc_sink :
11519 amdgpu_dm_connector->dc_em_sink;
11521 if (!edid || !sink) {
11522 dm_con_state = to_dm_connector_state(connector->state);
11524 amdgpu_dm_connector->min_vfreq = 0;
11525 amdgpu_dm_connector->max_vfreq = 0;
11526 amdgpu_dm_connector->pixel_clock_mhz = 0;
11527 connector->display_info.monitor_range.min_vfreq = 0;
11528 connector->display_info.monitor_range.max_vfreq = 0;
11529 freesync_capable = false;
11534 dm_con_state = to_dm_connector_state(connector->state);
11536 if (!adev->dm.freesync_module)
11540 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11541 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11542 bool edid_check_required = false;
11545 edid_check_required = is_dp_capable_without_timing_msa(
11547 amdgpu_dm_connector);
11550 if (edid_check_required == true && (edid->version > 1 ||
11551 (edid->version == 1 && edid->revision > 1))) {
11552 for (i = 0; i < 4; i++) {
11554 timing = &edid->detailed_timings[i];
11555 data = &timing->data.other_data;
11556 range = &data->data.range;
11558 * Check if monitor has continuous frequency mode
11560 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11563 * Check for flag range limits only. If flag == 1 then
11564 * no additional timing information provided.
11565 * Default GTF, GTF Secondary curve and CVT are not
11568 if (range->flags != 1)
11571 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11572 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11573 amdgpu_dm_connector->pixel_clock_mhz =
11574 range->pixel_clock_mhz * 10;
11576 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11577 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11582 if (amdgpu_dm_connector->max_vfreq -
11583 amdgpu_dm_connector->min_vfreq > 10) {
11585 freesync_capable = true;
11588 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11589 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11590 if (i >= 0 && vsdb_info.freesync_supported) {
11591 timing = &edid->detailed_timings[i];
11592 data = &timing->data.other_data;
11594 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11595 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11596 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11597 freesync_capable = true;
11599 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11600 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11606 dm_con_state->freesync_capable = freesync_capable;
11608 if (connector->vrr_capable_property)
11609 drm_connector_set_vrr_capable_property(connector,
11613 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11615 struct amdgpu_device *adev = drm_to_adev(dev);
11616 struct dc *dc = adev->dm.dc;
11619 mutex_lock(&adev->dm.dc_lock);
11620 if (dc->current_state) {
11621 for (i = 0; i < dc->current_state->stream_count; ++i)
11622 dc->current_state->streams[i]
11623 ->triggered_crtc_reset.enabled =
11624 adev->dm.force_timing_sync;
11626 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11627 dc_trigger_sync(dc, dc->current_state);
11629 mutex_unlock(&adev->dm.dc_lock);
11632 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11633 uint32_t value, const char *func_name)
11635 #ifdef DM_CHECK_ADDR_0
11636 if (address == 0) {
11637 DC_ERR("invalid register write. address = 0");
11641 cgs_write_register(ctx->cgs_device, address, value);
11642 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11645 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11646 const char *func_name)
11649 #ifdef DM_CHECK_ADDR_0
11650 if (address == 0) {
11651 DC_ERR("invalid register read; address = 0\n");
11656 if (ctx->dmub_srv &&
11657 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11658 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11663 value = cgs_read_register(ctx->cgs_device, address);
11665 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11670 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11671 struct dc_context *ctx,
11672 uint8_t status_type,
11673 uint32_t *operation_result)
11675 struct amdgpu_device *adev = ctx->driver_context;
11676 int return_status = -1;
11677 struct dmub_notification *p_notify = adev->dm.dmub_notify;
11680 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11681 return_status = p_notify->aux_reply.length;
11682 *operation_result = p_notify->result;
11683 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11684 *operation_result = AUX_RET_ERROR_TIMEOUT;
11685 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11686 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11688 *operation_result = AUX_RET_ERROR_UNKNOWN;
11691 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11693 *operation_result = p_notify->sc_status;
11695 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11699 return return_status;
11702 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11703 unsigned int link_index, void *cmd_payload, void *operation_result)
11705 struct amdgpu_device *adev = ctx->driver_context;
11709 dc_process_dmub_aux_transfer_async(ctx->dc,
11710 link_index, (struct aux_payload *)cmd_payload);
11711 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11712 (struct set_config_cmd_payload *)cmd_payload,
11713 adev->dm.dmub_notify)) {
11714 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11715 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11716 (uint32_t *)operation_result);
11719 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11721 DRM_ERROR("wait_for_completion_timeout timeout!");
11722 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11723 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11724 (uint32_t *)operation_result);
11728 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11729 struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11731 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11732 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11733 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11734 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11735 adev->dm.dmub_notify->aux_reply.length);
11740 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11741 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11742 (uint32_t *)operation_result);
11746 * Check whether seamless boot is supported.
11748 * So far we only support seamless boot on CHIP_VANGOGH.
11749 * If everything goes well, we may consider expanding
11750 * seamless boot to other ASICs.
11752 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11754 switch (adev->asic_type) {
11756 if (!adev->mman.keep_stolen_vga_memory)