2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/display/drm_hdcp_helper.h>
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
63 #include "amdgpu_dm_psr.h"
65 #include "ivsrcid/ivsrcid_vislands30.h"
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 #include <linux/dmi.h>
77 #include <drm/display/drm_dp_mst_helper.h>
78 #include <drm/display/drm_hdmi_helper.h>
79 #include <drm/drm_atomic.h>
80 #include <drm/drm_atomic_uapi.h>
81 #include <drm/drm_atomic_helper.h>
82 #include <drm/drm_blend.h>
83 #include <drm/drm_fb_helper.h>
84 #include <drm/drm_fourcc.h>
85 #include <drm/drm_edid.h>
86 #include <drm/drm_vblank.h>
87 #include <drm/drm_audio_component.h>
88 #include <drm/drm_gem_atomic_helper.h>
90 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
92 #include "dcn/dcn_1_0_offset.h"
93 #include "dcn/dcn_1_0_sh_mask.h"
94 #include "soc15_hw_ip.h"
95 #include "soc15_common.h"
96 #include "vega10_ip_offset.h"
98 #include "soc15_common.h"
100 #include "gc/gc_11_0_0_offset.h"
101 #include "gc/gc_11_0_0_sh_mask.h"
103 #include "modules/inc/mod_freesync.h"
104 #include "modules/power/power_helpers.h"
105 #include "modules/inc/mod_info_packet.h"
107 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
109 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
111 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
113 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
115 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
117 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
119 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
120 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
121 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
122 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
123 #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
124 MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
125 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
126 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
127 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
128 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
130 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
131 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
132 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
133 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
135 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
136 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
138 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
139 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
141 /* Number of bytes in PSP header for firmware. */
142 #define PSP_HEADER_BYTES 0x100
144 /* Number of bytes in PSP footer for firmware. */
145 #define PSP_FOOTER_BYTES 0x100
150 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
151 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
152 * requests into DC requests, and DC responses into DRM responses.
154 * The root control structure is &struct amdgpu_display_manager.
157 /* basic init/fini API */
158 static int amdgpu_dm_init(struct amdgpu_device *adev);
159 static void amdgpu_dm_fini(struct amdgpu_device *adev);
160 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
162 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
164 switch (link->dpcd_caps.dongle_type) {
165 case DISPLAY_DONGLE_NONE:
166 return DRM_MODE_SUBCONNECTOR_Native;
167 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
168 return DRM_MODE_SUBCONNECTOR_VGA;
169 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
170 case DISPLAY_DONGLE_DP_DVI_DONGLE:
171 return DRM_MODE_SUBCONNECTOR_DVID;
172 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
173 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
174 return DRM_MODE_SUBCONNECTOR_HDMIA;
175 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
177 return DRM_MODE_SUBCONNECTOR_Unknown;
181 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
183 struct dc_link *link = aconnector->dc_link;
184 struct drm_connector *connector = &aconnector->base;
185 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
187 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
190 if (aconnector->dc_sink)
191 subconnector = get_subconnector_type(link);
193 drm_object_property_set_value(&connector->base,
194 connector->dev->mode_config.dp_subconnector_property,
199 * initializes drm_device display related structures, based on the information
200 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
201 * drm_encoder, drm_mode_config
203 * Returns 0 on success
205 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
206 /* removes and deallocates the drm structures, created by the above function */
207 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
209 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
210 struct drm_plane *plane,
211 unsigned long possible_crtcs,
212 const struct dc_plane_cap *plane_cap);
213 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
214 struct drm_plane *plane,
215 uint32_t link_index);
216 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
217 struct amdgpu_dm_connector *amdgpu_dm_connector,
219 struct amdgpu_encoder *amdgpu_encoder);
220 static int amdgpu_dm_encoder_init(struct drm_device *dev,
221 struct amdgpu_encoder *aencoder,
222 uint32_t link_index);
224 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
226 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
228 static int amdgpu_dm_atomic_check(struct drm_device *dev,
229 struct drm_atomic_state *state);
231 static void handle_cursor_update(struct drm_plane *plane,
232 struct drm_plane_state *old_plane_state);
234 static const struct drm_format_info *
235 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
237 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
238 static void handle_hpd_rx_irq(void *param);
241 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
242 struct drm_crtc_state *new_crtc_state);
244 * dm_vblank_get_counter
247 * Get counter for number of vertical blanks
250 * struct amdgpu_device *adev - [in] desired amdgpu device
251 * int disp_idx - [in] which CRTC to get the counter from
254 * Counter for vertical blanks
256 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
258 if (crtc >= adev->mode_info.num_crtc)
261 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
263 if (acrtc->dm_irq_params.stream == NULL) {
264 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
269 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
273 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
274 u32 *vbl, u32 *position)
276 uint32_t v_blank_start, v_blank_end, h_position, v_position;
278 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
281 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
283 if (acrtc->dm_irq_params.stream == NULL) {
284 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
290 * TODO rework base driver to use values directly.
291 * for now parse it back into reg-format
293 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
299 *position = v_position | (h_position << 16);
300 *vbl = v_blank_start | (v_blank_end << 16);
306 static bool dm_is_idle(void *handle)
312 static int dm_wait_for_idle(void *handle)
318 static bool dm_check_soft_reset(void *handle)
323 static int dm_soft_reset(void *handle)
329 static struct amdgpu_crtc *
330 get_crtc_by_otg_inst(struct amdgpu_device *adev,
333 struct drm_device *dev = adev_to_drm(adev);
334 struct drm_crtc *crtc;
335 struct amdgpu_crtc *amdgpu_crtc;
337 if (WARN_ON(otg_inst == -1))
338 return adev->mode_info.crtcs[0];
340 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
341 amdgpu_crtc = to_amdgpu_crtc(crtc);
343 if (amdgpu_crtc->otg_inst == otg_inst)
350 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
352 return acrtc->dm_irq_params.freesync_config.state ==
353 VRR_STATE_ACTIVE_VARIABLE ||
354 acrtc->dm_irq_params.freesync_config.state ==
355 VRR_STATE_ACTIVE_FIXED;
358 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
360 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
361 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
364 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
365 struct dm_crtc_state *new_state)
367 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
369 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
376 * dm_pflip_high_irq() - Handle pageflip interrupt
377 * @interrupt_params: ignored
379 * Handles the pageflip interrupt by notifying all interested parties
380 * that the pageflip has been completed.
382 static void dm_pflip_high_irq(void *interrupt_params)
384 struct amdgpu_crtc *amdgpu_crtc;
385 struct common_irq_params *irq_params = interrupt_params;
386 struct amdgpu_device *adev = irq_params->adev;
388 struct drm_pending_vblank_event *e;
389 uint32_t vpos, hpos, v_blank_start, v_blank_end;
392 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
394 /* IRQ could occur when in initial stage */
395 /* TODO work and BO cleanup */
396 if (amdgpu_crtc == NULL) {
397 DC_LOG_PFLIP("CRTC is null, returning.\n");
401 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
403 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
404 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
405 amdgpu_crtc->pflip_status,
406 AMDGPU_FLIP_SUBMITTED,
407 amdgpu_crtc->crtc_id,
409 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
413 /* page flip completed. */
414 e = amdgpu_crtc->event;
415 amdgpu_crtc->event = NULL;
419 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
421 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
423 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
424 &v_blank_end, &hpos, &vpos) ||
425 (vpos < v_blank_start)) {
426 /* Update to correct count and vblank timestamp if racing with
427 * vblank irq. This also updates to the correct vblank timestamp
428 * even in VRR mode, as scanout is past the front-porch atm.
430 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
432 /* Wake up userspace by sending the pageflip event with proper
433 * count and timestamp of vblank of flip completion.
436 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
438 /* Event sent, so done with vblank for this flip */
439 drm_crtc_vblank_put(&amdgpu_crtc->base);
442 /* VRR active and inside front-porch: vblank count and
443 * timestamp for pageflip event will only be up to date after
444 * drm_crtc_handle_vblank() has been executed from late vblank
445 * irq handler after start of back-porch (vline 0). We queue the
446 * pageflip event for send-out by drm_crtc_handle_vblank() with
447 * updated timestamp and count, once it runs after us.
449 * We need to open-code this instead of using the helper
450 * drm_crtc_arm_vblank_event(), as that helper would
451 * call drm_crtc_accurate_vblank_count(), which we must
452 * not call in VRR mode while we are in front-porch!
455 /* sequence will be replaced by real count during send-out. */
456 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
457 e->pipe = amdgpu_crtc->crtc_id;
459 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
463 /* Keep track of vblank of this flip for flip throttling. We use the
464 * cooked hw counter, as that one incremented at start of this vblank
465 * of pageflip completion, so last_flip_vblank is the forbidden count
466 * for queueing new pageflips if vsync + VRR is enabled.
468 amdgpu_crtc->dm_irq_params.last_flip_vblank =
469 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
471 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
472 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
474 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
475 amdgpu_crtc->crtc_id, amdgpu_crtc,
476 vrr_active, (int) !e);
479 static void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
481 struct drm_crtc *crtc = &acrtc->base;
482 struct drm_device *dev = crtc->dev;
485 drm_crtc_handle_vblank(crtc);
487 spin_lock_irqsave(&dev->event_lock, flags);
489 /* Send completion event for cursor-only commits */
490 if (acrtc->event && acrtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
491 drm_crtc_send_vblank_event(crtc, acrtc->event);
492 drm_crtc_vblank_put(crtc);
496 spin_unlock_irqrestore(&dev->event_lock, flags);
499 static void dm_vupdate_high_irq(void *interrupt_params)
501 struct common_irq_params *irq_params = interrupt_params;
502 struct amdgpu_device *adev = irq_params->adev;
503 struct amdgpu_crtc *acrtc;
504 struct drm_device *drm_dev;
505 struct drm_vblank_crtc *vblank;
506 ktime_t frame_duration_ns, previous_timestamp;
510 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
513 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
514 drm_dev = acrtc->base.dev;
515 vblank = &drm_dev->vblank[acrtc->base.index];
516 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
517 frame_duration_ns = vblank->time - previous_timestamp;
519 if (frame_duration_ns > 0) {
520 trace_amdgpu_refresh_rate_track(acrtc->base.index,
522 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
523 atomic64_set(&irq_params->previous_timestamp, vblank->time);
526 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
530 /* Core vblank handling is done here after end of front-porch in
531 * vrr mode, as vblank timestamping will give valid results
532 * while now done after front-porch. This will also deliver
533 * page-flip completion events that have been queued to us
534 * if a pageflip happened inside front-porch.
537 dm_crtc_handle_vblank(acrtc);
539 /* BTR processing for pre-DCE12 ASICs */
540 if (acrtc->dm_irq_params.stream &&
541 adev->family < AMDGPU_FAMILY_AI) {
542 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
543 mod_freesync_handle_v_update(
544 adev->dm.freesync_module,
545 acrtc->dm_irq_params.stream,
546 &acrtc->dm_irq_params.vrr_params);
548 dc_stream_adjust_vmin_vmax(
550 acrtc->dm_irq_params.stream,
551 &acrtc->dm_irq_params.vrr_params.adjust);
552 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
559 * dm_crtc_high_irq() - Handles CRTC interrupt
560 * @interrupt_params: used for determining the CRTC instance
562 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
565 static void dm_crtc_high_irq(void *interrupt_params)
567 struct common_irq_params *irq_params = interrupt_params;
568 struct amdgpu_device *adev = irq_params->adev;
569 struct amdgpu_crtc *acrtc;
573 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
577 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
579 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
580 vrr_active, acrtc->dm_irq_params.active_planes);
583 * Core vblank handling at start of front-porch is only possible
584 * in non-vrr mode, as only there vblank timestamping will give
585 * valid results while done in front-porch. Otherwise defer it
586 * to dm_vupdate_high_irq after end of front-porch.
589 dm_crtc_handle_vblank(acrtc);
592 * Following stuff must happen at start of vblank, for crc
593 * computation and below-the-range btr support in vrr mode.
595 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
597 /* BTR updates need to happen before VUPDATE on Vega and above. */
598 if (adev->family < AMDGPU_FAMILY_AI)
601 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
603 if (acrtc->dm_irq_params.stream &&
604 acrtc->dm_irq_params.vrr_params.supported &&
605 acrtc->dm_irq_params.freesync_config.state ==
606 VRR_STATE_ACTIVE_VARIABLE) {
607 mod_freesync_handle_v_update(adev->dm.freesync_module,
608 acrtc->dm_irq_params.stream,
609 &acrtc->dm_irq_params.vrr_params);
611 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
612 &acrtc->dm_irq_params.vrr_params.adjust);
616 * If there aren't any active_planes then DCH HUBP may be clock-gated.
617 * In that case, pageflip completion interrupts won't fire and pageflip
618 * completion events won't get delivered. Prevent this by sending
619 * pending pageflip events from here if a flip is still pending.
621 * If any planes are enabled, use dm_pflip_high_irq() instead, to
622 * avoid race conditions between flip programming and completion,
623 * which could cause too early flip completion events.
625 if (adev->family >= AMDGPU_FAMILY_RV &&
626 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
627 acrtc->dm_irq_params.active_planes == 0) {
629 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
631 drm_crtc_vblank_put(&acrtc->base);
633 acrtc->pflip_status = AMDGPU_FLIP_NONE;
636 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
639 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
641 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
642 * DCN generation ASICs
643 * @interrupt_params: interrupt parameters
645 * Used to set crc window/read out crc value at vertical line 0 position
647 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
649 struct common_irq_params *irq_params = interrupt_params;
650 struct amdgpu_device *adev = irq_params->adev;
651 struct amdgpu_crtc *acrtc;
653 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
658 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
660 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
663 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
664 * @adev: amdgpu_device pointer
665 * @notify: dmub notification structure
667 * Dmub AUX or SET_CONFIG command completion processing callback
668 * Copies dmub notification to DM which is to be read by AUX command.
669 * issuing thread and also signals the event to wake up the thread.
671 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
672 struct dmub_notification *notify)
674 if (adev->dm.dmub_notify)
675 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
676 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
677 complete(&adev->dm.dmub_aux_transfer_done);
681 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
682 * @adev: amdgpu_device pointer
683 * @notify: dmub notification structure
685 * Dmub Hpd interrupt processing callback. Gets displayindex through the
686 * ink index and calls helper to do the processing.
688 static void dmub_hpd_callback(struct amdgpu_device *adev,
689 struct dmub_notification *notify)
691 struct amdgpu_dm_connector *aconnector;
692 struct amdgpu_dm_connector *hpd_aconnector = NULL;
693 struct drm_connector *connector;
694 struct drm_connector_list_iter iter;
695 struct dc_link *link;
696 uint8_t link_index = 0;
697 struct drm_device *dev;
702 if (notify == NULL) {
703 DRM_ERROR("DMUB HPD callback notification was NULL");
707 if (notify->link_index > adev->dm.dc->link_count) {
708 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
712 link_index = notify->link_index;
713 link = adev->dm.dc->links[link_index];
716 drm_connector_list_iter_begin(dev, &iter);
717 drm_for_each_connector_iter(connector, &iter) {
718 aconnector = to_amdgpu_dm_connector(connector);
719 if (link && aconnector->dc_link == link) {
720 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
721 hpd_aconnector = aconnector;
725 drm_connector_list_iter_end(&iter);
727 if (hpd_aconnector) {
728 if (notify->type == DMUB_NOTIFICATION_HPD)
729 handle_hpd_irq_helper(hpd_aconnector);
730 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
731 handle_hpd_rx_irq(hpd_aconnector);
736 * register_dmub_notify_callback - Sets callback for DMUB notify
737 * @adev: amdgpu_device pointer
738 * @type: Type of dmub notification
739 * @callback: Dmub interrupt callback function
740 * @dmub_int_thread_offload: offload indicator
742 * API to register a dmub callback handler for a dmub notification
743 * Also sets indicator whether callback processing to be offloaded.
744 * to dmub interrupt handling thread
745 * Return: true if successfully registered, false if there is existing registration
747 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
748 enum dmub_notification_type type,
749 dmub_notify_interrupt_callback_t callback,
750 bool dmub_int_thread_offload)
752 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
753 adev->dm.dmub_callback[type] = callback;
754 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
761 static void dm_handle_hpd_work(struct work_struct *work)
763 struct dmub_hpd_work *dmub_hpd_wrk;
765 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
767 if (!dmub_hpd_wrk->dmub_notify) {
768 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
772 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
773 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
774 dmub_hpd_wrk->dmub_notify);
777 kfree(dmub_hpd_wrk->dmub_notify);
782 #define DMUB_TRACE_MAX_READ 64
784 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
785 * @interrupt_params: used for determining the Outbox instance
787 * Handles the Outbox Interrupt
790 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
792 struct dmub_notification notify;
793 struct common_irq_params *irq_params = interrupt_params;
794 struct amdgpu_device *adev = irq_params->adev;
795 struct amdgpu_display_manager *dm = &adev->dm;
796 struct dmcub_trace_buf_entry entry = { 0 };
798 struct dmub_hpd_work *dmub_hpd_wrk;
799 struct dc_link *plink = NULL;
801 if (dc_enable_dmub_notifications(adev->dm.dc) &&
802 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
805 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify);
806 if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
807 DRM_ERROR("DM: notify type %d invalid!", notify.type);
810 if (!dm->dmub_callback[notify.type]) {
811 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
814 if (dm->dmub_thread_offload[notify.type] == true) {
815 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
817 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
820 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
821 if (!dmub_hpd_wrk->dmub_notify) {
823 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
826 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
827 if (dmub_hpd_wrk->dmub_notify)
828 memcpy(dmub_hpd_wrk->dmub_notify, ¬ify, sizeof(struct dmub_notification));
829 dmub_hpd_wrk->adev = adev;
830 if (notify.type == DMUB_NOTIFICATION_HPD) {
831 plink = adev->dm.dc->links[notify.link_index];
834 notify.hpd_status == DP_HPD_PLUG;
837 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
839 dm->dmub_callback[notify.type](adev, ¬ify);
841 } while (notify.pending_notification);
846 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
847 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
848 entry.param0, entry.param1);
850 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
851 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
857 } while (count <= DMUB_TRACE_MAX_READ);
859 if (count > DMUB_TRACE_MAX_READ)
860 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
863 static int dm_set_clockgating_state(void *handle,
864 enum amd_clockgating_state state)
869 static int dm_set_powergating_state(void *handle,
870 enum amd_powergating_state state)
875 /* Prototypes of private functions */
876 static int dm_early_init(void* handle);
878 /* Allocate memory for FBC compressed data */
879 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
881 struct drm_device *dev = connector->dev;
882 struct amdgpu_device *adev = drm_to_adev(dev);
883 struct dm_compressor_info *compressor = &adev->dm.compressor;
884 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
885 struct drm_display_mode *mode;
886 unsigned long max_size = 0;
888 if (adev->dm.dc->fbc_compressor == NULL)
891 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
894 if (compressor->bo_ptr)
898 list_for_each_entry(mode, &connector->modes, head) {
899 if (max_size < mode->htotal * mode->vtotal)
900 max_size = mode->htotal * mode->vtotal;
904 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
905 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
906 &compressor->gpu_addr, &compressor->cpu_addr);
909 DRM_ERROR("DM: Failed to initialize FBC\n");
911 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
912 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
919 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
920 int pipe, bool *enabled,
921 unsigned char *buf, int max_bytes)
923 struct drm_device *dev = dev_get_drvdata(kdev);
924 struct amdgpu_device *adev = drm_to_adev(dev);
925 struct drm_connector *connector;
926 struct drm_connector_list_iter conn_iter;
927 struct amdgpu_dm_connector *aconnector;
932 mutex_lock(&adev->dm.audio_lock);
934 drm_connector_list_iter_begin(dev, &conn_iter);
935 drm_for_each_connector_iter(connector, &conn_iter) {
936 aconnector = to_amdgpu_dm_connector(connector);
937 if (aconnector->audio_inst != port)
941 ret = drm_eld_size(connector->eld);
942 memcpy(buf, connector->eld, min(max_bytes, ret));
946 drm_connector_list_iter_end(&conn_iter);
948 mutex_unlock(&adev->dm.audio_lock);
950 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
955 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
956 .get_eld = amdgpu_dm_audio_component_get_eld,
959 static int amdgpu_dm_audio_component_bind(struct device *kdev,
960 struct device *hda_kdev, void *data)
962 struct drm_device *dev = dev_get_drvdata(kdev);
963 struct amdgpu_device *adev = drm_to_adev(dev);
964 struct drm_audio_component *acomp = data;
966 acomp->ops = &amdgpu_dm_audio_component_ops;
968 adev->dm.audio_component = acomp;
973 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
974 struct device *hda_kdev, void *data)
976 struct drm_device *dev = dev_get_drvdata(kdev);
977 struct amdgpu_device *adev = drm_to_adev(dev);
978 struct drm_audio_component *acomp = data;
982 adev->dm.audio_component = NULL;
985 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
986 .bind = amdgpu_dm_audio_component_bind,
987 .unbind = amdgpu_dm_audio_component_unbind,
990 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
997 adev->mode_info.audio.enabled = true;
999 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
1001 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1002 adev->mode_info.audio.pin[i].channels = -1;
1003 adev->mode_info.audio.pin[i].rate = -1;
1004 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1005 adev->mode_info.audio.pin[i].status_bits = 0;
1006 adev->mode_info.audio.pin[i].category_code = 0;
1007 adev->mode_info.audio.pin[i].connected = false;
1008 adev->mode_info.audio.pin[i].id =
1009 adev->dm.dc->res_pool->audios[i]->inst;
1010 adev->mode_info.audio.pin[i].offset = 0;
1013 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1017 adev->dm.audio_registered = true;
1022 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
1027 if (!adev->mode_info.audio.enabled)
1030 if (adev->dm.audio_registered) {
1031 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1032 adev->dm.audio_registered = false;
1035 /* TODO: Disable audio? */
1037 adev->mode_info.audio.enabled = false;
1040 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1042 struct drm_audio_component *acomp = adev->dm.audio_component;
1044 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1045 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1047 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1052 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1054 const struct dmcub_firmware_header_v1_0 *hdr;
1055 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1056 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1057 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1058 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1059 struct abm *abm = adev->dm.dc->res_pool->abm;
1060 struct dmub_srv_hw_params hw_params;
1061 enum dmub_status status;
1062 const unsigned char *fw_inst_const, *fw_bss_data;
1063 uint32_t i, fw_inst_const_size, fw_bss_data_size;
1064 bool has_hw_support;
1067 /* DMUB isn't supported on the ASIC. */
1071 DRM_ERROR("No framebuffer info for DMUB service.\n");
1076 /* Firmware required for DMUB support. */
1077 DRM_ERROR("No firmware provided for DMUB.\n");
1081 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1082 if (status != DMUB_STATUS_OK) {
1083 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1087 if (!has_hw_support) {
1088 DRM_INFO("DMUB unsupported on ASIC\n");
1092 /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1093 status = dmub_srv_hw_reset(dmub_srv);
1094 if (status != DMUB_STATUS_OK)
1095 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1097 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1099 fw_inst_const = dmub_fw->data +
1100 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1103 fw_bss_data = dmub_fw->data +
1104 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1105 le32_to_cpu(hdr->inst_const_bytes);
1107 /* Copy firmware and bios info into FB memory. */
1108 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1109 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1111 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1113 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1114 * amdgpu_ucode_init_single_fw will load dmub firmware
1115 * fw_inst_const part to cw0; otherwise, the firmware back door load
1116 * will be done by dm_dmub_hw_init
1118 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1119 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1120 fw_inst_const_size);
1123 if (fw_bss_data_size)
1124 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1125 fw_bss_data, fw_bss_data_size);
1127 /* Copy firmware bios info into FB memory. */
1128 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1131 /* Reset regions that need to be reset. */
1132 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1133 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1135 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1136 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1138 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1139 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1141 /* Initialize hardware. */
1142 memset(&hw_params, 0, sizeof(hw_params));
1143 hw_params.fb_base = adev->gmc.fb_start;
1144 hw_params.fb_offset = adev->gmc.aper_base;
1146 /* backdoor load firmware and trigger dmub running */
1147 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1148 hw_params.load_inst_const = true;
1151 hw_params.psp_version = dmcu->psp_version;
1153 for (i = 0; i < fb_info->num_fb; ++i)
1154 hw_params.fb[i] = &fb_info->fb[i];
1156 switch (adev->ip_versions[DCE_HWIP][0]) {
1157 case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1158 hw_params.dpia_supported = true;
1159 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1165 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1166 if (status != DMUB_STATUS_OK) {
1167 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1171 /* Wait for firmware load to finish. */
1172 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1173 if (status != DMUB_STATUS_OK)
1174 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1176 /* Init DMCU and ABM if available. */
1178 dmcu->funcs->dmcu_init(dmcu);
1179 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1182 if (!adev->dm.dc->ctx->dmub_srv)
1183 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1184 if (!adev->dm.dc->ctx->dmub_srv) {
1185 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1189 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1190 adev->dm.dmcub_fw_version);
1195 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1197 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1198 enum dmub_status status;
1202 /* DMUB isn't supported on the ASIC. */
1206 status = dmub_srv_is_hw_init(dmub_srv, &init);
1207 if (status != DMUB_STATUS_OK)
1208 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1210 if (status == DMUB_STATUS_OK && init) {
1211 /* Wait for firmware load to finish. */
1212 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1213 if (status != DMUB_STATUS_OK)
1214 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1216 /* Perform the full hardware initialization. */
1217 dm_dmub_hw_init(adev);
1221 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1224 uint32_t logical_addr_low;
1225 uint32_t logical_addr_high;
1226 uint32_t agp_base, agp_bot, agp_top;
1227 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1229 memset(pa_config, 0, sizeof(*pa_config));
1231 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1232 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1234 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1236 * Raven2 has a HW issue that it is unable to use the vram which
1237 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1238 * workaround that increase system aperture high address (add 1)
1239 * to get rid of the VM fault and hardware hang.
1241 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1243 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1246 agp_bot = adev->gmc.agp_start >> 24;
1247 agp_top = adev->gmc.agp_end >> 24;
1250 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1251 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1252 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1253 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1254 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1255 page_table_base.low_part = lower_32_bits(pt_base);
1257 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1258 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1260 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1261 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1262 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1264 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1265 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1266 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1268 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1269 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1270 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1272 pa_config->is_hvm_enabled = 0;
1276 static void vblank_control_worker(struct work_struct *work)
1278 struct vblank_control_work *vblank_work =
1279 container_of(work, struct vblank_control_work, work);
1280 struct amdgpu_display_manager *dm = vblank_work->dm;
1282 mutex_lock(&dm->dc_lock);
1284 if (vblank_work->enable)
1285 dm->active_vblank_irq_count++;
1286 else if(dm->active_vblank_irq_count)
1287 dm->active_vblank_irq_count--;
1289 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1291 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1294 * Control PSR based on vblank requirements from OS
1296 * If panel supports PSR SU, there's no need to disable PSR when OS is
1297 * submitting fast atomic commits (we infer this by whether the OS
1298 * requests vblank events). Fast atomic commits will simply trigger a
1299 * full-frame-update (FFU); a specific case of selective-update (SU)
1300 * where the SU region is the full hactive*vactive region. See
1301 * fill_dc_dirty_rects().
1303 if (vblank_work->stream && vblank_work->stream->link) {
1304 if (vblank_work->enable) {
1305 if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
1306 vblank_work->stream->link->psr_settings.psr_allow_active)
1307 amdgpu_dm_psr_disable(vblank_work->stream);
1308 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1309 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1310 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1311 amdgpu_dm_psr_enable(vblank_work->stream);
1315 mutex_unlock(&dm->dc_lock);
1317 dc_stream_release(vblank_work->stream);
1322 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1324 struct hpd_rx_irq_offload_work *offload_work;
1325 struct amdgpu_dm_connector *aconnector;
1326 struct dc_link *dc_link;
1327 struct amdgpu_device *adev;
1328 enum dc_connection_type new_connection_type = dc_connection_none;
1329 unsigned long flags;
1331 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1332 aconnector = offload_work->offload_wq->aconnector;
1335 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1339 adev = drm_to_adev(aconnector->base.dev);
1340 dc_link = aconnector->dc_link;
1342 mutex_lock(&aconnector->hpd_lock);
1343 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1344 DRM_ERROR("KMS: Failed to detect connector\n");
1345 mutex_unlock(&aconnector->hpd_lock);
1347 if (new_connection_type == dc_connection_none)
1350 if (amdgpu_in_reset(adev))
1353 mutex_lock(&adev->dm.dc_lock);
1354 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1355 dc_link_dp_handle_automated_test(dc_link);
1356 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1357 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1358 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1359 dc_link_dp_handle_link_loss(dc_link);
1360 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1361 offload_work->offload_wq->is_handling_link_loss = false;
1362 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1364 mutex_unlock(&adev->dm.dc_lock);
1367 kfree(offload_work);
1371 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1373 int max_caps = dc->caps.max_links;
1375 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1377 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1379 if (!hpd_rx_offload_wq)
1383 for (i = 0; i < max_caps; i++) {
1384 hpd_rx_offload_wq[i].wq =
1385 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1387 if (hpd_rx_offload_wq[i].wq == NULL) {
1388 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1392 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1395 return hpd_rx_offload_wq;
1398 struct amdgpu_stutter_quirk {
1406 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1407 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1408 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1412 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1414 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1416 while (p && p->chip_device != 0) {
1417 if (pdev->vendor == p->chip_vendor &&
1418 pdev->device == p->chip_device &&
1419 pdev->subsystem_vendor == p->subsys_vendor &&
1420 pdev->subsystem_device == p->subsys_device &&
1421 pdev->revision == p->revision) {
1429 static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1432 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1433 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1438 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1439 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1444 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1445 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1451 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1453 const struct dmi_system_id *dmi_id;
1455 dm->aux_hpd_discon_quirk = false;
1457 dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1459 dm->aux_hpd_discon_quirk = true;
1460 DRM_INFO("aux_hpd_discon_quirk attached\n");
1464 static int amdgpu_dm_init(struct amdgpu_device *adev)
1466 struct dc_init_data init_data;
1467 #ifdef CONFIG_DRM_AMD_DC_HDCP
1468 struct dc_callback_init init_params;
1472 adev->dm.ddev = adev_to_drm(adev);
1473 adev->dm.adev = adev;
1475 /* Zero all the fields */
1476 memset(&init_data, 0, sizeof(init_data));
1477 #ifdef CONFIG_DRM_AMD_DC_HDCP
1478 memset(&init_params, 0, sizeof(init_params));
1481 mutex_init(&adev->dm.dc_lock);
1482 mutex_init(&adev->dm.audio_lock);
1483 spin_lock_init(&adev->dm.vblank_lock);
1485 if(amdgpu_dm_irq_init(adev)) {
1486 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1490 init_data.asic_id.chip_family = adev->family;
1492 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1493 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1494 init_data.asic_id.chip_id = adev->pdev->device;
1496 init_data.asic_id.vram_width = adev->gmc.vram_width;
1497 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1498 init_data.asic_id.atombios_base_address =
1499 adev->mode_info.atom_context->bios;
1501 init_data.driver = adev;
1503 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1505 if (!adev->dm.cgs_device) {
1506 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1510 init_data.cgs_device = adev->dm.cgs_device;
1512 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1514 switch (adev->ip_versions[DCE_HWIP][0]) {
1515 case IP_VERSION(2, 1, 0):
1516 switch (adev->dm.dmcub_fw_version) {
1517 case 0: /* development */
1518 case 0x1: /* linux-firmware.git hash 6d9f399 */
1519 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1520 init_data.flags.disable_dmcu = false;
1523 init_data.flags.disable_dmcu = true;
1526 case IP_VERSION(2, 0, 3):
1527 init_data.flags.disable_dmcu = true;
1533 switch (adev->asic_type) {
1536 init_data.flags.gpu_vm_support = true;
1539 switch (adev->ip_versions[DCE_HWIP][0]) {
1540 case IP_VERSION(1, 0, 0):
1541 case IP_VERSION(1, 0, 1):
1542 /* enable S/G on PCO and RV2 */
1543 if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1544 (adev->apu_flags & AMD_APU_IS_PICASSO))
1545 init_data.flags.gpu_vm_support = true;
1547 case IP_VERSION(2, 1, 0):
1548 case IP_VERSION(3, 0, 1):
1549 case IP_VERSION(3, 1, 2):
1550 case IP_VERSION(3, 1, 3):
1551 case IP_VERSION(3, 1, 5):
1552 case IP_VERSION(3, 1, 6):
1553 init_data.flags.gpu_vm_support = true;
1561 if (init_data.flags.gpu_vm_support)
1562 adev->mode_info.gpu_vm_support = true;
1564 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1565 init_data.flags.fbc_support = true;
1567 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1568 init_data.flags.multi_mon_pp_mclk_switch = true;
1570 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1571 init_data.flags.disable_fractional_pwm = true;
1573 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1574 init_data.flags.edp_no_power_sequencing = true;
1576 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1577 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1578 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1579 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1581 init_data.flags.seamless_boot_edp_requested = false;
1583 if (check_seamless_boot_capability(adev)) {
1584 init_data.flags.seamless_boot_edp_requested = true;
1585 init_data.flags.allow_seamless_boot_optimization = true;
1586 DRM_INFO("Seamless boot condition check passed\n");
1589 init_data.flags.enable_mipi_converter_optimization = true;
1591 init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
1592 init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
1594 INIT_LIST_HEAD(&adev->dm.da_list);
1596 retrieve_dmi_info(&adev->dm);
1598 /* Display Core create. */
1599 adev->dm.dc = dc_create(&init_data);
1602 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1604 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1608 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1609 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1610 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1613 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1614 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1615 if (dm_should_disable_stutter(adev->pdev))
1616 adev->dm.dc->debug.disable_stutter = true;
1618 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1619 adev->dm.dc->debug.disable_stutter = true;
1621 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1622 adev->dm.dc->debug.disable_dsc = true;
1623 adev->dm.dc->debug.disable_dsc_edp = true;
1626 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1627 adev->dm.dc->debug.disable_clock_gate = true;
1629 if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
1630 adev->dm.dc->debug.force_subvp_mclk_switch = true;
1632 r = dm_dmub_hw_init(adev);
1634 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1638 dc_hardware_init(adev->dm.dc);
1640 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1641 if (!adev->dm.hpd_rx_offload_wq) {
1642 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1646 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1647 struct dc_phy_addr_space_config pa_config;
1649 mmhub_read_system_context(adev, &pa_config);
1651 // Call the DC init_memory func
1652 dc_setup_system_context(adev->dm.dc, &pa_config);
1655 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1656 if (!adev->dm.freesync_module) {
1658 "amdgpu: failed to initialize freesync_module.\n");
1660 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1661 adev->dm.freesync_module);
1663 amdgpu_dm_init_color_mod();
1665 if (adev->dm.dc->caps.max_links > 0) {
1666 adev->dm.vblank_control_workqueue =
1667 create_singlethread_workqueue("dm_vblank_control_workqueue");
1668 if (!adev->dm.vblank_control_workqueue)
1669 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1672 #ifdef CONFIG_DRM_AMD_DC_HDCP
1673 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1674 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1676 if (!adev->dm.hdcp_workqueue)
1677 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1679 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1681 dc_init_callbacks(adev->dm.dc, &init_params);
1684 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1685 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1687 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
1688 init_completion(&adev->dm.dmub_aux_transfer_done);
1689 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1690 if (!adev->dm.dmub_notify) {
1691 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1695 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1696 if (!adev->dm.delayed_hpd_wq) {
1697 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1701 amdgpu_dm_outbox_init(adev);
1702 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1703 dmub_aux_setconfig_callback, false)) {
1704 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1707 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1708 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1711 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1712 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1717 if (amdgpu_dm_initialize_drm_device(adev)) {
1719 "amdgpu: failed to initialize sw for display support.\n");
1723 /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
1724 * It is expected that DMUB will resend any pending notifications at this point, for
1725 * example HPD from DPIA.
1727 if (dc_is_dmub_outbox_supported(adev->dm.dc))
1728 dc_enable_dmub_outbox(adev->dm.dc);
1730 /* create fake encoders for MST */
1731 dm_dp_create_fake_mst_encoders(adev);
1733 /* TODO: Add_display_info? */
1735 /* TODO use dynamic cursor width */
1736 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1737 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1739 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1741 "amdgpu: failed to initialize sw for display support.\n");
1746 DRM_DEBUG_DRIVER("KMS initialized.\n");
1750 amdgpu_dm_fini(adev);
1755 static int amdgpu_dm_early_fini(void *handle)
1757 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1759 amdgpu_dm_audio_fini(adev);
1764 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1768 if (adev->dm.vblank_control_workqueue) {
1769 destroy_workqueue(adev->dm.vblank_control_workqueue);
1770 adev->dm.vblank_control_workqueue = NULL;
1773 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1774 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1777 amdgpu_dm_destroy_drm_device(&adev->dm);
1779 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1780 if (adev->dm.crc_rd_wrk) {
1781 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1782 kfree(adev->dm.crc_rd_wrk);
1783 adev->dm.crc_rd_wrk = NULL;
1786 #ifdef CONFIG_DRM_AMD_DC_HDCP
1787 if (adev->dm.hdcp_workqueue) {
1788 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1789 adev->dm.hdcp_workqueue = NULL;
1793 dc_deinit_callbacks(adev->dm.dc);
1796 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1798 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1799 kfree(adev->dm.dmub_notify);
1800 adev->dm.dmub_notify = NULL;
1801 destroy_workqueue(adev->dm.delayed_hpd_wq);
1802 adev->dm.delayed_hpd_wq = NULL;
1805 if (adev->dm.dmub_bo)
1806 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1807 &adev->dm.dmub_bo_gpu_addr,
1808 &adev->dm.dmub_bo_cpu_addr);
1810 if (adev->dm.hpd_rx_offload_wq) {
1811 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1812 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1813 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1814 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1818 kfree(adev->dm.hpd_rx_offload_wq);
1819 adev->dm.hpd_rx_offload_wq = NULL;
1822 /* DC Destroy TODO: Replace destroy DAL */
1824 dc_destroy(&adev->dm.dc);
1826 * TODO: pageflip, vlank interrupt
1828 * amdgpu_dm_irq_fini(adev);
1831 if (adev->dm.cgs_device) {
1832 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1833 adev->dm.cgs_device = NULL;
1835 if (adev->dm.freesync_module) {
1836 mod_freesync_destroy(adev->dm.freesync_module);
1837 adev->dm.freesync_module = NULL;
1840 mutex_destroy(&adev->dm.audio_lock);
1841 mutex_destroy(&adev->dm.dc_lock);
1846 static int load_dmcu_fw(struct amdgpu_device *adev)
1848 const char *fw_name_dmcu = NULL;
1850 const struct dmcu_firmware_header_v1_0 *hdr;
1852 switch(adev->asic_type) {
1853 #if defined(CONFIG_DRM_AMD_DC_SI)
1868 case CHIP_POLARIS11:
1869 case CHIP_POLARIS10:
1870 case CHIP_POLARIS12:
1877 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1880 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1881 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1882 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1883 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1888 switch (adev->ip_versions[DCE_HWIP][0]) {
1889 case IP_VERSION(2, 0, 2):
1890 case IP_VERSION(2, 0, 3):
1891 case IP_VERSION(2, 0, 0):
1892 case IP_VERSION(2, 1, 0):
1893 case IP_VERSION(3, 0, 0):
1894 case IP_VERSION(3, 0, 2):
1895 case IP_VERSION(3, 0, 3):
1896 case IP_VERSION(3, 0, 1):
1897 case IP_VERSION(3, 1, 2):
1898 case IP_VERSION(3, 1, 3):
1899 case IP_VERSION(3, 1, 5):
1900 case IP_VERSION(3, 1, 6):
1901 case IP_VERSION(3, 2, 0):
1902 case IP_VERSION(3, 2, 1):
1907 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1911 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1912 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1916 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1918 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1919 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1920 adev->dm.fw_dmcu = NULL;
1924 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1929 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1931 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1933 release_firmware(adev->dm.fw_dmcu);
1934 adev->dm.fw_dmcu = NULL;
1938 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1939 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1940 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1941 adev->firmware.fw_size +=
1942 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1944 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1945 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1946 adev->firmware.fw_size +=
1947 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1949 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1951 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1956 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1958 struct amdgpu_device *adev = ctx;
1960 return dm_read_reg(adev->dm.dc->ctx, address);
1963 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1966 struct amdgpu_device *adev = ctx;
1968 return dm_write_reg(adev->dm.dc->ctx, address, value);
1971 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1973 struct dmub_srv_create_params create_params;
1974 struct dmub_srv_region_params region_params;
1975 struct dmub_srv_region_info region_info;
1976 struct dmub_srv_fb_params fb_params;
1977 struct dmub_srv_fb_info *fb_info;
1978 struct dmub_srv *dmub_srv;
1979 const struct dmcub_firmware_header_v1_0 *hdr;
1980 const char *fw_name_dmub;
1981 enum dmub_asic dmub_asic;
1982 enum dmub_status status;
1985 switch (adev->ip_versions[DCE_HWIP][0]) {
1986 case IP_VERSION(2, 1, 0):
1987 dmub_asic = DMUB_ASIC_DCN21;
1988 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1989 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1990 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1992 case IP_VERSION(3, 0, 0):
1993 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1994 dmub_asic = DMUB_ASIC_DCN30;
1995 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1997 dmub_asic = DMUB_ASIC_DCN30;
1998 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
2001 case IP_VERSION(3, 0, 1):
2002 dmub_asic = DMUB_ASIC_DCN301;
2003 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
2005 case IP_VERSION(3, 0, 2):
2006 dmub_asic = DMUB_ASIC_DCN302;
2007 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
2009 case IP_VERSION(3, 0, 3):
2010 dmub_asic = DMUB_ASIC_DCN303;
2011 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
2013 case IP_VERSION(3, 1, 2):
2014 case IP_VERSION(3, 1, 3):
2015 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
2016 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
2018 case IP_VERSION(3, 1, 4):
2019 dmub_asic = DMUB_ASIC_DCN314;
2020 fw_name_dmub = FIRMWARE_DCN_314_DMUB;
2022 case IP_VERSION(3, 1, 5):
2023 dmub_asic = DMUB_ASIC_DCN315;
2024 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
2026 case IP_VERSION(3, 1, 6):
2027 dmub_asic = DMUB_ASIC_DCN316;
2028 fw_name_dmub = FIRMWARE_DCN316_DMUB;
2030 case IP_VERSION(3, 2, 0):
2031 dmub_asic = DMUB_ASIC_DCN32;
2032 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
2034 case IP_VERSION(3, 2, 1):
2035 dmub_asic = DMUB_ASIC_DCN321;
2036 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
2039 /* ASIC doesn't support DMUB. */
2043 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
2045 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
2049 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
2051 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
2055 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
2056 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
2058 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2059 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
2060 AMDGPU_UCODE_ID_DMCUB;
2061 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
2063 adev->firmware.fw_size +=
2064 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
2066 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
2067 adev->dm.dmcub_fw_version);
2071 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
2072 dmub_srv = adev->dm.dmub_srv;
2075 DRM_ERROR("Failed to allocate DMUB service!\n");
2079 memset(&create_params, 0, sizeof(create_params));
2080 create_params.user_ctx = adev;
2081 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
2082 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
2083 create_params.asic = dmub_asic;
2085 /* Create the DMUB service. */
2086 status = dmub_srv_create(dmub_srv, &create_params);
2087 if (status != DMUB_STATUS_OK) {
2088 DRM_ERROR("Error creating DMUB service: %d\n", status);
2092 /* Calculate the size of all the regions for the DMUB service. */
2093 memset(®ion_params, 0, sizeof(region_params));
2095 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2096 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2097 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2098 region_params.vbios_size = adev->bios_size;
2099 region_params.fw_bss_data = region_params.bss_data_size ?
2100 adev->dm.dmub_fw->data +
2101 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2102 le32_to_cpu(hdr->inst_const_bytes) : NULL;
2103 region_params.fw_inst_const =
2104 adev->dm.dmub_fw->data +
2105 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2108 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
2111 if (status != DMUB_STATUS_OK) {
2112 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2117 * Allocate a framebuffer based on the total size of all the regions.
2118 * TODO: Move this into GART.
2120 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2121 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2122 &adev->dm.dmub_bo_gpu_addr,
2123 &adev->dm.dmub_bo_cpu_addr);
2127 /* Rebase the regions on the framebuffer address. */
2128 memset(&fb_params, 0, sizeof(fb_params));
2129 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2130 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2131 fb_params.region_info = ®ion_info;
2133 adev->dm.dmub_fb_info =
2134 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2135 fb_info = adev->dm.dmub_fb_info;
2139 "Failed to allocate framebuffer info for DMUB service!\n");
2143 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2144 if (status != DMUB_STATUS_OK) {
2145 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2152 static int dm_sw_init(void *handle)
2154 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2157 r = dm_dmub_sw_init(adev);
2161 return load_dmcu_fw(adev);
2164 static int dm_sw_fini(void *handle)
2166 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2168 kfree(adev->dm.dmub_fb_info);
2169 adev->dm.dmub_fb_info = NULL;
2171 if (adev->dm.dmub_srv) {
2172 dmub_srv_destroy(adev->dm.dmub_srv);
2173 adev->dm.dmub_srv = NULL;
2176 release_firmware(adev->dm.dmub_fw);
2177 adev->dm.dmub_fw = NULL;
2179 release_firmware(adev->dm.fw_dmcu);
2180 adev->dm.fw_dmcu = NULL;
2185 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2187 struct amdgpu_dm_connector *aconnector;
2188 struct drm_connector *connector;
2189 struct drm_connector_list_iter iter;
2192 drm_connector_list_iter_begin(dev, &iter);
2193 drm_for_each_connector_iter(connector, &iter) {
2194 aconnector = to_amdgpu_dm_connector(connector);
2195 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2196 aconnector->mst_mgr.aux) {
2197 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2199 aconnector->base.base.id);
2201 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2203 DRM_ERROR("DM_MST: Failed to start MST\n");
2204 aconnector->dc_link->type =
2205 dc_connection_single;
2210 drm_connector_list_iter_end(&iter);
2215 static int dm_late_init(void *handle)
2217 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2219 struct dmcu_iram_parameters params;
2220 unsigned int linear_lut[16];
2222 struct dmcu *dmcu = NULL;
2224 dmcu = adev->dm.dc->res_pool->dmcu;
2226 for (i = 0; i < 16; i++)
2227 linear_lut[i] = 0xFFFF * i / 15;
2230 params.backlight_ramping_override = false;
2231 params.backlight_ramping_start = 0xCCCC;
2232 params.backlight_ramping_reduction = 0xCCCCCCCC;
2233 params.backlight_lut_array_size = 16;
2234 params.backlight_lut_array = linear_lut;
2236 /* Min backlight level after ABM reduction, Don't allow below 1%
2237 * 0xFFFF x 0.01 = 0x28F
2239 params.min_abm_backlight = 0x28F;
2240 /* In the case where abm is implemented on dmcub,
2241 * dmcu object will be null.
2242 * ABM 2.4 and up are implemented on dmcub.
2245 if (!dmcu_load_iram(dmcu, params))
2247 } else if (adev->dm.dc->ctx->dmub_srv) {
2248 struct dc_link *edp_links[MAX_NUM_EDP];
2251 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2252 for (i = 0; i < edp_num; i++) {
2253 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2258 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2261 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2263 struct amdgpu_dm_connector *aconnector;
2264 struct drm_connector *connector;
2265 struct drm_connector_list_iter iter;
2266 struct drm_dp_mst_topology_mgr *mgr;
2268 bool need_hotplug = false;
2270 drm_connector_list_iter_begin(dev, &iter);
2271 drm_for_each_connector_iter(connector, &iter) {
2272 aconnector = to_amdgpu_dm_connector(connector);
2273 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2274 aconnector->mst_port)
2277 mgr = &aconnector->mst_mgr;
2280 drm_dp_mst_topology_mgr_suspend(mgr);
2282 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2284 dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2285 aconnector->dc_link);
2286 need_hotplug = true;
2290 drm_connector_list_iter_end(&iter);
2293 drm_kms_helper_hotplug_event(dev);
2296 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2300 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2301 * on window driver dc implementation.
2302 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2303 * should be passed to smu during boot up and resume from s3.
2304 * boot up: dc calculate dcn watermark clock settings within dc_create,
2305 * dcn20_resource_construct
2306 * then call pplib functions below to pass the settings to smu:
2307 * smu_set_watermarks_for_clock_ranges
2308 * smu_set_watermarks_table
2309 * navi10_set_watermarks_table
2310 * smu_write_watermarks_table
2312 * For Renoir, clock settings of dcn watermark are also fixed values.
2313 * dc has implemented different flow for window driver:
2314 * dc_hardware_init / dc_set_power_state
2319 * smu_set_watermarks_for_clock_ranges
2320 * renoir_set_watermarks_table
2321 * smu_write_watermarks_table
2324 * dc_hardware_init -> amdgpu_dm_init
2325 * dc_set_power_state --> dm_resume
2327 * therefore, this function apply to navi10/12/14 but not Renoir
2330 switch (adev->ip_versions[DCE_HWIP][0]) {
2331 case IP_VERSION(2, 0, 2):
2332 case IP_VERSION(2, 0, 0):
2338 ret = amdgpu_dpm_write_watermarks_table(adev);
2340 DRM_ERROR("Failed to update WMTABLE!\n");
2348 * dm_hw_init() - Initialize DC device
2349 * @handle: The base driver device containing the amdgpu_dm device.
2351 * Initialize the &struct amdgpu_display_manager device. This involves calling
2352 * the initializers of each DM component, then populating the struct with them.
2354 * Although the function implies hardware initialization, both hardware and
2355 * software are initialized here. Splitting them out to their relevant init
2356 * hooks is a future TODO item.
2358 * Some notable things that are initialized here:
2360 * - Display Core, both software and hardware
2361 * - DC modules that we need (freesync and color management)
2362 * - DRM software states
2363 * - Interrupt sources and handlers
2365 * - Debug FS entries, if enabled
2367 static int dm_hw_init(void *handle)
2369 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2370 /* Create DAL display manager */
2371 amdgpu_dm_init(adev);
2372 amdgpu_dm_hpd_init(adev);
2378 * dm_hw_fini() - Teardown DC device
2379 * @handle: The base driver device containing the amdgpu_dm device.
2381 * Teardown components within &struct amdgpu_display_manager that require
2382 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2383 * were loaded. Also flush IRQ workqueues and disable them.
2385 static int dm_hw_fini(void *handle)
2387 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2389 amdgpu_dm_hpd_fini(adev);
2391 amdgpu_dm_irq_fini(adev);
2392 amdgpu_dm_fini(adev);
2397 static int dm_enable_vblank(struct drm_crtc *crtc);
2398 static void dm_disable_vblank(struct drm_crtc *crtc);
2400 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2401 struct dc_state *state, bool enable)
2403 enum dc_irq_source irq_source;
2404 struct amdgpu_crtc *acrtc;
2408 for (i = 0; i < state->stream_count; i++) {
2409 acrtc = get_crtc_by_otg_inst(
2410 adev, state->stream_status[i].primary_otg_inst);
2412 if (acrtc && state->stream_status[i].plane_count != 0) {
2413 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2414 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2415 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2416 acrtc->crtc_id, enable ? "en" : "dis", rc);
2418 DRM_WARN("Failed to %s pflip interrupts\n",
2419 enable ? "enable" : "disable");
2422 rc = dm_enable_vblank(&acrtc->base);
2424 DRM_WARN("Failed to enable vblank interrupts\n");
2426 dm_disable_vblank(&acrtc->base);
2434 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2436 struct dc_state *context = NULL;
2437 enum dc_status res = DC_ERROR_UNEXPECTED;
2439 struct dc_stream_state *del_streams[MAX_PIPES];
2440 int del_streams_count = 0;
2442 memset(del_streams, 0, sizeof(del_streams));
2444 context = dc_create_state(dc);
2445 if (context == NULL)
2446 goto context_alloc_fail;
2448 dc_resource_state_copy_construct_current(dc, context);
2450 /* First remove from context all streams */
2451 for (i = 0; i < context->stream_count; i++) {
2452 struct dc_stream_state *stream = context->streams[i];
2454 del_streams[del_streams_count++] = stream;
2457 /* Remove all planes for removed streams and then remove the streams */
2458 for (i = 0; i < del_streams_count; i++) {
2459 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2460 res = DC_FAIL_DETACH_SURFACES;
2464 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2469 res = dc_commit_state(dc, context);
2472 dc_release_state(context);
2478 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2482 if (dm->hpd_rx_offload_wq) {
2483 for (i = 0; i < dm->dc->caps.max_links; i++)
2484 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2488 static int dm_suspend(void *handle)
2490 struct amdgpu_device *adev = handle;
2491 struct amdgpu_display_manager *dm = &adev->dm;
2494 if (amdgpu_in_reset(adev)) {
2495 mutex_lock(&dm->dc_lock);
2497 dc_allow_idle_optimizations(adev->dm.dc, false);
2499 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2501 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2503 amdgpu_dm_commit_zero_streams(dm->dc);
2505 amdgpu_dm_irq_suspend(adev);
2507 hpd_rx_irq_work_suspend(dm);
2512 WARN_ON(adev->dm.cached_state);
2513 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2515 s3_handle_mst(adev_to_drm(adev), true);
2517 amdgpu_dm_irq_suspend(adev);
2519 hpd_rx_irq_work_suspend(dm);
2521 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2526 struct amdgpu_dm_connector *
2527 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2528 struct drm_crtc *crtc)
2531 struct drm_connector_state *new_con_state;
2532 struct drm_connector *connector;
2533 struct drm_crtc *crtc_from_state;
2535 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2536 crtc_from_state = new_con_state->crtc;
2538 if (crtc_from_state == crtc)
2539 return to_amdgpu_dm_connector(connector);
2545 static void emulated_link_detect(struct dc_link *link)
2547 struct dc_sink_init_data sink_init_data = { 0 };
2548 struct display_sink_capability sink_caps = { 0 };
2549 enum dc_edid_status edid_status;
2550 struct dc_context *dc_ctx = link->ctx;
2551 struct dc_sink *sink = NULL;
2552 struct dc_sink *prev_sink = NULL;
2554 link->type = dc_connection_none;
2555 prev_sink = link->local_sink;
2558 dc_sink_release(prev_sink);
2560 switch (link->connector_signal) {
2561 case SIGNAL_TYPE_HDMI_TYPE_A: {
2562 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2563 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2567 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2568 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2569 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2573 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2574 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2575 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2579 case SIGNAL_TYPE_LVDS: {
2580 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2581 sink_caps.signal = SIGNAL_TYPE_LVDS;
2585 case SIGNAL_TYPE_EDP: {
2586 sink_caps.transaction_type =
2587 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2588 sink_caps.signal = SIGNAL_TYPE_EDP;
2592 case SIGNAL_TYPE_DISPLAY_PORT: {
2593 sink_caps.transaction_type =
2594 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2595 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2600 DC_ERROR("Invalid connector type! signal:%d\n",
2601 link->connector_signal);
2605 sink_init_data.link = link;
2606 sink_init_data.sink_signal = sink_caps.signal;
2608 sink = dc_sink_create(&sink_init_data);
2610 DC_ERROR("Failed to create sink!\n");
2614 /* dc_sink_create returns a new reference */
2615 link->local_sink = sink;
2617 edid_status = dm_helpers_read_local_edid(
2622 if (edid_status != EDID_OK)
2623 DC_ERROR("Failed to read EDID");
2627 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2628 struct amdgpu_display_manager *dm)
2631 struct dc_surface_update surface_updates[MAX_SURFACES];
2632 struct dc_plane_info plane_infos[MAX_SURFACES];
2633 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2634 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2635 struct dc_stream_update stream_update;
2639 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2642 dm_error("Failed to allocate update bundle\n");
2646 for (k = 0; k < dc_state->stream_count; k++) {
2647 bundle->stream_update.stream = dc_state->streams[k];
2649 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2650 bundle->surface_updates[m].surface =
2651 dc_state->stream_status->plane_states[m];
2652 bundle->surface_updates[m].surface->force_full_update =
2655 dc_commit_updates_for_stream(
2656 dm->dc, bundle->surface_updates,
2657 dc_state->stream_status->plane_count,
2658 dc_state->streams[k], &bundle->stream_update, dc_state);
2667 static int dm_resume(void *handle)
2669 struct amdgpu_device *adev = handle;
2670 struct drm_device *ddev = adev_to_drm(adev);
2671 struct amdgpu_display_manager *dm = &adev->dm;
2672 struct amdgpu_dm_connector *aconnector;
2673 struct drm_connector *connector;
2674 struct drm_connector_list_iter iter;
2675 struct drm_crtc *crtc;
2676 struct drm_crtc_state *new_crtc_state;
2677 struct dm_crtc_state *dm_new_crtc_state;
2678 struct drm_plane *plane;
2679 struct drm_plane_state *new_plane_state;
2680 struct dm_plane_state *dm_new_plane_state;
2681 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2682 enum dc_connection_type new_connection_type = dc_connection_none;
2683 struct dc_state *dc_state;
2686 if (amdgpu_in_reset(adev)) {
2687 dc_state = dm->cached_dc_state;
2690 * The dc->current_state is backed up into dm->cached_dc_state
2691 * before we commit 0 streams.
2693 * DC will clear link encoder assignments on the real state
2694 * but the changes won't propagate over to the copy we made
2695 * before the 0 streams commit.
2697 * DC expects that link encoder assignments are *not* valid
2698 * when committing a state, so as a workaround we can copy
2699 * off of the current state.
2701 * We lose the previous assignments, but we had already
2702 * commit 0 streams anyway.
2704 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2706 r = dm_dmub_hw_init(adev);
2708 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2710 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2713 amdgpu_dm_irq_resume_early(adev);
2715 for (i = 0; i < dc_state->stream_count; i++) {
2716 dc_state->streams[i]->mode_changed = true;
2717 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2718 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2723 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2724 amdgpu_dm_outbox_init(adev);
2725 dc_enable_dmub_outbox(adev->dm.dc);
2728 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2730 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2732 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2734 dc_release_state(dm->cached_dc_state);
2735 dm->cached_dc_state = NULL;
2737 amdgpu_dm_irq_resume_late(adev);
2739 mutex_unlock(&dm->dc_lock);
2743 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2744 dc_release_state(dm_state->context);
2745 dm_state->context = dc_create_state(dm->dc);
2746 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2747 dc_resource_state_construct(dm->dc, dm_state->context);
2749 /* Before powering on DC we need to re-initialize DMUB. */
2750 dm_dmub_hw_resume(adev);
2752 /* Re-enable outbox interrupts for DPIA. */
2753 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2754 amdgpu_dm_outbox_init(adev);
2755 dc_enable_dmub_outbox(adev->dm.dc);
2758 /* power on hardware */
2759 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2761 /* program HPD filter */
2765 * early enable HPD Rx IRQ, should be done before set mode as short
2766 * pulse interrupts are used for MST
2768 amdgpu_dm_irq_resume_early(adev);
2770 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2771 s3_handle_mst(ddev, false);
2774 drm_connector_list_iter_begin(ddev, &iter);
2775 drm_for_each_connector_iter(connector, &iter) {
2776 aconnector = to_amdgpu_dm_connector(connector);
2779 * this is the case when traversing through already created
2780 * MST connectors, should be skipped
2782 if (aconnector->dc_link &&
2783 aconnector->dc_link->type == dc_connection_mst_branch)
2786 mutex_lock(&aconnector->hpd_lock);
2787 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2788 DRM_ERROR("KMS: Failed to detect connector\n");
2790 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2791 emulated_link_detect(aconnector->dc_link);
2793 mutex_lock(&dm->dc_lock);
2794 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2795 mutex_unlock(&dm->dc_lock);
2798 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2799 aconnector->fake_enable = false;
2801 if (aconnector->dc_sink)
2802 dc_sink_release(aconnector->dc_sink);
2803 aconnector->dc_sink = NULL;
2804 amdgpu_dm_update_connector_after_detect(aconnector);
2805 mutex_unlock(&aconnector->hpd_lock);
2807 drm_connector_list_iter_end(&iter);
2809 /* Force mode set in atomic commit */
2810 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2811 new_crtc_state->active_changed = true;
2814 * atomic_check is expected to create the dc states. We need to release
2815 * them here, since they were duplicated as part of the suspend
2818 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2819 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2820 if (dm_new_crtc_state->stream) {
2821 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2822 dc_stream_release(dm_new_crtc_state->stream);
2823 dm_new_crtc_state->stream = NULL;
2827 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2828 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2829 if (dm_new_plane_state->dc_state) {
2830 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2831 dc_plane_state_release(dm_new_plane_state->dc_state);
2832 dm_new_plane_state->dc_state = NULL;
2836 drm_atomic_helper_resume(ddev, dm->cached_state);
2838 dm->cached_state = NULL;
2840 amdgpu_dm_irq_resume_late(adev);
2842 amdgpu_dm_smu_write_watermarks_table(adev);
2850 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2851 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2852 * the base driver's device list to be initialized and torn down accordingly.
2854 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2857 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2859 .early_init = dm_early_init,
2860 .late_init = dm_late_init,
2861 .sw_init = dm_sw_init,
2862 .sw_fini = dm_sw_fini,
2863 .early_fini = amdgpu_dm_early_fini,
2864 .hw_init = dm_hw_init,
2865 .hw_fini = dm_hw_fini,
2866 .suspend = dm_suspend,
2867 .resume = dm_resume,
2868 .is_idle = dm_is_idle,
2869 .wait_for_idle = dm_wait_for_idle,
2870 .check_soft_reset = dm_check_soft_reset,
2871 .soft_reset = dm_soft_reset,
2872 .set_clockgating_state = dm_set_clockgating_state,
2873 .set_powergating_state = dm_set_powergating_state,
2876 const struct amdgpu_ip_block_version dm_ip_block =
2878 .type = AMD_IP_BLOCK_TYPE_DCE,
2882 .funcs = &amdgpu_dm_funcs,
2892 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2893 .fb_create = amdgpu_display_user_framebuffer_create,
2894 .get_format_info = amd_get_format_info,
2895 .output_poll_changed = drm_fb_helper_output_poll_changed,
2896 .atomic_check = amdgpu_dm_atomic_check,
2897 .atomic_commit = drm_atomic_helper_commit,
2900 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2901 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2904 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2906 u32 max_avg, min_cll, max, min, q, r;
2907 struct amdgpu_dm_backlight_caps *caps;
2908 struct amdgpu_display_manager *dm;
2909 struct drm_connector *conn_base;
2910 struct amdgpu_device *adev;
2911 struct dc_link *link = NULL;
2912 static const u8 pre_computed_values[] = {
2913 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2914 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2917 if (!aconnector || !aconnector->dc_link)
2920 link = aconnector->dc_link;
2921 if (link->connector_signal != SIGNAL_TYPE_EDP)
2924 conn_base = &aconnector->base;
2925 adev = drm_to_adev(conn_base->dev);
2927 for (i = 0; i < dm->num_of_edps; i++) {
2928 if (link == dm->backlight_link[i])
2931 if (i >= dm->num_of_edps)
2933 caps = &dm->backlight_caps[i];
2934 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2935 caps->aux_support = false;
2936 max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2937 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2939 if (caps->ext_caps->bits.oled == 1 /*||
2940 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2941 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2942 caps->aux_support = true;
2944 if (amdgpu_backlight == 0)
2945 caps->aux_support = false;
2946 else if (amdgpu_backlight == 1)
2947 caps->aux_support = true;
2949 /* From the specification (CTA-861-G), for calculating the maximum
2950 * luminance we need to use:
2951 * Luminance = 50*2**(CV/32)
2952 * Where CV is a one-byte value.
2953 * For calculating this expression we may need float point precision;
2954 * to avoid this complexity level, we take advantage that CV is divided
2955 * by a constant. From the Euclids division algorithm, we know that CV
2956 * can be written as: CV = 32*q + r. Next, we replace CV in the
2957 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2958 * need to pre-compute the value of r/32. For pre-computing the values
2959 * We just used the following Ruby line:
2960 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2961 * The results of the above expressions can be verified at
2962 * pre_computed_values.
2966 max = (1 << q) * pre_computed_values[r];
2968 // min luminance: maxLum * (CV/255)^2 / 100
2969 q = DIV_ROUND_CLOSEST(min_cll, 255);
2970 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2972 caps->aux_max_input_signal = max;
2973 caps->aux_min_input_signal = min;
2976 void amdgpu_dm_update_connector_after_detect(
2977 struct amdgpu_dm_connector *aconnector)
2979 struct drm_connector *connector = &aconnector->base;
2980 struct drm_device *dev = connector->dev;
2981 struct dc_sink *sink;
2983 /* MST handled by drm_mst framework */
2984 if (aconnector->mst_mgr.mst_state == true)
2987 sink = aconnector->dc_link->local_sink;
2989 dc_sink_retain(sink);
2992 * Edid mgmt connector gets first update only in mode_valid hook and then
2993 * the connector sink is set to either fake or physical sink depends on link status.
2994 * Skip if already done during boot.
2996 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2997 && aconnector->dc_em_sink) {
3000 * For S3 resume with headless use eml_sink to fake stream
3001 * because on resume connector->sink is set to NULL
3003 mutex_lock(&dev->mode_config.mutex);
3006 if (aconnector->dc_sink) {
3007 amdgpu_dm_update_freesync_caps(connector, NULL);
3009 * retain and release below are used to
3010 * bump up refcount for sink because the link doesn't point
3011 * to it anymore after disconnect, so on next crtc to connector
3012 * reshuffle by UMD we will get into unwanted dc_sink release
3014 dc_sink_release(aconnector->dc_sink);
3016 aconnector->dc_sink = sink;
3017 dc_sink_retain(aconnector->dc_sink);
3018 amdgpu_dm_update_freesync_caps(connector,
3021 amdgpu_dm_update_freesync_caps(connector, NULL);
3022 if (!aconnector->dc_sink) {
3023 aconnector->dc_sink = aconnector->dc_em_sink;
3024 dc_sink_retain(aconnector->dc_sink);
3028 mutex_unlock(&dev->mode_config.mutex);
3031 dc_sink_release(sink);
3036 * TODO: temporary guard to look for proper fix
3037 * if this sink is MST sink, we should not do anything
3039 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
3040 dc_sink_release(sink);
3044 if (aconnector->dc_sink == sink) {
3046 * We got a DP short pulse (Link Loss, DP CTS, etc...).
3049 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
3050 aconnector->connector_id);
3052 dc_sink_release(sink);
3056 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
3057 aconnector->connector_id, aconnector->dc_sink, sink);
3059 mutex_lock(&dev->mode_config.mutex);
3062 * 1. Update status of the drm connector
3063 * 2. Send an event and let userspace tell us what to do
3067 * TODO: check if we still need the S3 mode update workaround.
3068 * If yes, put it here.
3070 if (aconnector->dc_sink) {
3071 amdgpu_dm_update_freesync_caps(connector, NULL);
3072 dc_sink_release(aconnector->dc_sink);
3075 aconnector->dc_sink = sink;
3076 dc_sink_retain(aconnector->dc_sink);
3077 if (sink->dc_edid.length == 0) {
3078 aconnector->edid = NULL;
3079 if (aconnector->dc_link->aux_mode) {
3080 drm_dp_cec_unset_edid(
3081 &aconnector->dm_dp_aux.aux);
3085 (struct edid *)sink->dc_edid.raw_edid;
3087 if (aconnector->dc_link->aux_mode)
3088 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3092 drm_connector_update_edid_property(connector, aconnector->edid);
3093 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3094 update_connector_ext_caps(aconnector);
3096 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3097 amdgpu_dm_update_freesync_caps(connector, NULL);
3098 drm_connector_update_edid_property(connector, NULL);
3099 aconnector->num_modes = 0;
3100 dc_sink_release(aconnector->dc_sink);
3101 aconnector->dc_sink = NULL;
3102 aconnector->edid = NULL;
3103 #ifdef CONFIG_DRM_AMD_DC_HDCP
3104 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3105 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3106 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3110 mutex_unlock(&dev->mode_config.mutex);
3112 update_subconnector_property(aconnector);
3115 dc_sink_release(sink);
3118 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3120 struct drm_connector *connector = &aconnector->base;
3121 struct drm_device *dev = connector->dev;
3122 enum dc_connection_type new_connection_type = dc_connection_none;
3123 struct amdgpu_device *adev = drm_to_adev(dev);
3124 #ifdef CONFIG_DRM_AMD_DC_HDCP
3125 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3129 if (adev->dm.disable_hpd_irq)
3133 * In case of failure or MST no need to update connector status or notify the OS
3134 * since (for MST case) MST does this in its own context.
3136 mutex_lock(&aconnector->hpd_lock);
3138 #ifdef CONFIG_DRM_AMD_DC_HDCP
3139 if (adev->dm.hdcp_workqueue) {
3140 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3141 dm_con_state->update_hdcp = true;
3144 if (aconnector->fake_enable)
3145 aconnector->fake_enable = false;
3147 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3148 DRM_ERROR("KMS: Failed to detect connector\n");
3150 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3151 emulated_link_detect(aconnector->dc_link);
3153 drm_modeset_lock_all(dev);
3154 dm_restore_drm_connector_state(dev, connector);
3155 drm_modeset_unlock_all(dev);
3157 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3158 drm_kms_helper_connector_hotplug_event(connector);
3160 mutex_lock(&adev->dm.dc_lock);
3161 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3162 mutex_unlock(&adev->dm.dc_lock);
3164 amdgpu_dm_update_connector_after_detect(aconnector);
3166 drm_modeset_lock_all(dev);
3167 dm_restore_drm_connector_state(dev, connector);
3168 drm_modeset_unlock_all(dev);
3170 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3171 drm_kms_helper_connector_hotplug_event(connector);
3174 mutex_unlock(&aconnector->hpd_lock);
3178 static void handle_hpd_irq(void *param)
3180 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3182 handle_hpd_irq_helper(aconnector);
3186 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3188 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3190 bool new_irq_handled = false;
3192 int dpcd_bytes_to_read;
3194 const int max_process_count = 30;
3195 int process_count = 0;
3197 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3199 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3200 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3201 /* DPCD 0x200 - 0x201 for downstream IRQ */
3202 dpcd_addr = DP_SINK_COUNT;
3204 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3205 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3206 dpcd_addr = DP_SINK_COUNT_ESI;
3209 dret = drm_dp_dpcd_read(
3210 &aconnector->dm_dp_aux.aux,
3213 dpcd_bytes_to_read);
3215 while (dret == dpcd_bytes_to_read &&
3216 process_count < max_process_count) {
3222 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3223 /* handle HPD short pulse irq */
3224 if (aconnector->mst_mgr.mst_state)
3226 &aconnector->mst_mgr,
3230 if (new_irq_handled) {
3231 /* ACK at DPCD to notify down stream */
3232 const int ack_dpcd_bytes_to_write =
3233 dpcd_bytes_to_read - 1;
3235 for (retry = 0; retry < 3; retry++) {
3238 wret = drm_dp_dpcd_write(
3239 &aconnector->dm_dp_aux.aux,
3242 ack_dpcd_bytes_to_write);
3243 if (wret == ack_dpcd_bytes_to_write)
3247 /* check if there is new irq to be handled */
3248 dret = drm_dp_dpcd_read(
3249 &aconnector->dm_dp_aux.aux,
3252 dpcd_bytes_to_read);
3254 new_irq_handled = false;
3260 if (process_count == max_process_count)
3261 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3264 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3265 union hpd_irq_data hpd_irq_data)
3267 struct hpd_rx_irq_offload_work *offload_work =
3268 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3270 if (!offload_work) {
3271 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3275 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3276 offload_work->data = hpd_irq_data;
3277 offload_work->offload_wq = offload_wq;
3279 queue_work(offload_wq->wq, &offload_work->work);
3280 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3283 static void handle_hpd_rx_irq(void *param)
3285 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3286 struct drm_connector *connector = &aconnector->base;
3287 struct drm_device *dev = connector->dev;
3288 struct dc_link *dc_link = aconnector->dc_link;
3289 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3290 bool result = false;
3291 enum dc_connection_type new_connection_type = dc_connection_none;
3292 struct amdgpu_device *adev = drm_to_adev(dev);
3293 union hpd_irq_data hpd_irq_data;
3294 bool link_loss = false;
3295 bool has_left_work = false;
3296 int idx = aconnector->base.index;
3297 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3299 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3301 if (adev->dm.disable_hpd_irq)
3305 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3306 * conflict, after implement i2c helper, this mutex should be
3309 mutex_lock(&aconnector->hpd_lock);
3311 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3312 &link_loss, true, &has_left_work);
3317 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3318 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3322 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3323 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3324 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3325 dm_handle_mst_sideband_msg(aconnector);
3332 spin_lock(&offload_wq->offload_lock);
3333 skip = offload_wq->is_handling_link_loss;
3336 offload_wq->is_handling_link_loss = true;
3338 spin_unlock(&offload_wq->offload_lock);
3341 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3348 if (result && !is_mst_root_connector) {
3349 /* Downstream Port status changed. */
3350 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3351 DRM_ERROR("KMS: Failed to detect connector\n");
3353 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3354 emulated_link_detect(dc_link);
3356 if (aconnector->fake_enable)
3357 aconnector->fake_enable = false;
3359 amdgpu_dm_update_connector_after_detect(aconnector);
3362 drm_modeset_lock_all(dev);
3363 dm_restore_drm_connector_state(dev, connector);
3364 drm_modeset_unlock_all(dev);
3366 drm_kms_helper_connector_hotplug_event(connector);
3370 mutex_lock(&adev->dm.dc_lock);
3371 ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
3372 mutex_unlock(&adev->dm.dc_lock);
3375 if (aconnector->fake_enable)
3376 aconnector->fake_enable = false;
3378 amdgpu_dm_update_connector_after_detect(aconnector);
3380 drm_modeset_lock_all(dev);
3381 dm_restore_drm_connector_state(dev, connector);
3382 drm_modeset_unlock_all(dev);
3384 drm_kms_helper_connector_hotplug_event(connector);
3388 #ifdef CONFIG_DRM_AMD_DC_HDCP
3389 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3390 if (adev->dm.hdcp_workqueue)
3391 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3395 if (dc_link->type != dc_connection_mst_branch)
3396 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3398 mutex_unlock(&aconnector->hpd_lock);
3401 static void register_hpd_handlers(struct amdgpu_device *adev)
3403 struct drm_device *dev = adev_to_drm(adev);
3404 struct drm_connector *connector;
3405 struct amdgpu_dm_connector *aconnector;
3406 const struct dc_link *dc_link;
3407 struct dc_interrupt_params int_params = {0};
3409 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3410 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3412 list_for_each_entry(connector,
3413 &dev->mode_config.connector_list, head) {
3415 aconnector = to_amdgpu_dm_connector(connector);
3416 dc_link = aconnector->dc_link;
3418 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3419 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3420 int_params.irq_source = dc_link->irq_source_hpd;
3422 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3424 (void *) aconnector);
3427 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3429 /* Also register for DP short pulse (hpd_rx). */
3430 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3431 int_params.irq_source = dc_link->irq_source_hpd_rx;
3433 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3435 (void *) aconnector);
3437 if (adev->dm.hpd_rx_offload_wq)
3438 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3444 #if defined(CONFIG_DRM_AMD_DC_SI)
3445 /* Register IRQ sources and initialize IRQ callbacks */
3446 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3448 struct dc *dc = adev->dm.dc;
3449 struct common_irq_params *c_irq_params;
3450 struct dc_interrupt_params int_params = {0};
3453 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3455 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3456 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3459 * Actions of amdgpu_irq_add_id():
3460 * 1. Register a set() function with base driver.
3461 * Base driver will call set() function to enable/disable an
3462 * interrupt in DC hardware.
3463 * 2. Register amdgpu_dm_irq_handler().
3464 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3465 * coming from DC hardware.
3466 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3467 * for acknowledging and handling. */
3469 /* Use VBLANK interrupt */
3470 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3471 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3473 DRM_ERROR("Failed to add crtc irq id!\n");
3477 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3478 int_params.irq_source =
3479 dc_interrupt_to_irq_source(dc, i+1 , 0);
3481 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3483 c_irq_params->adev = adev;
3484 c_irq_params->irq_src = int_params.irq_source;
3486 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3487 dm_crtc_high_irq, c_irq_params);
3490 /* Use GRPH_PFLIP interrupt */
3491 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3492 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3493 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3495 DRM_ERROR("Failed to add page flip irq id!\n");
3499 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3500 int_params.irq_source =
3501 dc_interrupt_to_irq_source(dc, i, 0);
3503 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3505 c_irq_params->adev = adev;
3506 c_irq_params->irq_src = int_params.irq_source;
3508 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3509 dm_pflip_high_irq, c_irq_params);
3514 r = amdgpu_irq_add_id(adev, client_id,
3515 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3517 DRM_ERROR("Failed to add hpd irq id!\n");
3521 register_hpd_handlers(adev);
3527 /* Register IRQ sources and initialize IRQ callbacks */
3528 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3530 struct dc *dc = adev->dm.dc;
3531 struct common_irq_params *c_irq_params;
3532 struct dc_interrupt_params int_params = {0};
3535 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3537 if (adev->family >= AMDGPU_FAMILY_AI)
3538 client_id = SOC15_IH_CLIENTID_DCE;
3540 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3541 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3544 * Actions of amdgpu_irq_add_id():
3545 * 1. Register a set() function with base driver.
3546 * Base driver will call set() function to enable/disable an
3547 * interrupt in DC hardware.
3548 * 2. Register amdgpu_dm_irq_handler().
3549 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3550 * coming from DC hardware.
3551 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3552 * for acknowledging and handling. */
3554 /* Use VBLANK interrupt */
3555 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3556 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3558 DRM_ERROR("Failed to add crtc irq id!\n");
3562 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3563 int_params.irq_source =
3564 dc_interrupt_to_irq_source(dc, i, 0);
3566 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3568 c_irq_params->adev = adev;
3569 c_irq_params->irq_src = int_params.irq_source;
3571 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3572 dm_crtc_high_irq, c_irq_params);
3575 /* Use VUPDATE interrupt */
3576 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3577 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3579 DRM_ERROR("Failed to add vupdate irq id!\n");
3583 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3584 int_params.irq_source =
3585 dc_interrupt_to_irq_source(dc, i, 0);
3587 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3589 c_irq_params->adev = adev;
3590 c_irq_params->irq_src = int_params.irq_source;
3592 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3593 dm_vupdate_high_irq, c_irq_params);
3596 /* Use GRPH_PFLIP interrupt */
3597 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3598 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3599 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3601 DRM_ERROR("Failed to add page flip irq id!\n");
3605 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3606 int_params.irq_source =
3607 dc_interrupt_to_irq_source(dc, i, 0);
3609 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3611 c_irq_params->adev = adev;
3612 c_irq_params->irq_src = int_params.irq_source;
3614 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3615 dm_pflip_high_irq, c_irq_params);
3620 r = amdgpu_irq_add_id(adev, client_id,
3621 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3623 DRM_ERROR("Failed to add hpd irq id!\n");
3627 register_hpd_handlers(adev);
3632 /* Register IRQ sources and initialize IRQ callbacks */
3633 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3635 struct dc *dc = adev->dm.dc;
3636 struct common_irq_params *c_irq_params;
3637 struct dc_interrupt_params int_params = {0};
3640 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3641 static const unsigned int vrtl_int_srcid[] = {
3642 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3643 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3644 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3645 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3646 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3647 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3651 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3652 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3655 * Actions of amdgpu_irq_add_id():
3656 * 1. Register a set() function with base driver.
3657 * Base driver will call set() function to enable/disable an
3658 * interrupt in DC hardware.
3659 * 2. Register amdgpu_dm_irq_handler().
3660 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3661 * coming from DC hardware.
3662 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3663 * for acknowledging and handling.
3666 /* Use VSTARTUP interrupt */
3667 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3668 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3670 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3673 DRM_ERROR("Failed to add crtc irq id!\n");
3677 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3678 int_params.irq_source =
3679 dc_interrupt_to_irq_source(dc, i, 0);
3681 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3683 c_irq_params->adev = adev;
3684 c_irq_params->irq_src = int_params.irq_source;
3686 amdgpu_dm_irq_register_interrupt(
3687 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3690 /* Use otg vertical line interrupt */
3691 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3692 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3693 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3694 vrtl_int_srcid[i], &adev->vline0_irq);
3697 DRM_ERROR("Failed to add vline0 irq id!\n");
3701 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3702 int_params.irq_source =
3703 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3705 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3706 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3710 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3711 - DC_IRQ_SOURCE_DC1_VLINE0];
3713 c_irq_params->adev = adev;
3714 c_irq_params->irq_src = int_params.irq_source;
3716 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3717 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3721 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3722 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3723 * to trigger at end of each vblank, regardless of state of the lock,
3724 * matching DCE behaviour.
3726 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3727 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3729 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3732 DRM_ERROR("Failed to add vupdate irq id!\n");
3736 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3737 int_params.irq_source =
3738 dc_interrupt_to_irq_source(dc, i, 0);
3740 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3742 c_irq_params->adev = adev;
3743 c_irq_params->irq_src = int_params.irq_source;
3745 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3746 dm_vupdate_high_irq, c_irq_params);
3749 /* Use GRPH_PFLIP interrupt */
3750 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3751 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3753 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3755 DRM_ERROR("Failed to add page flip irq id!\n");
3759 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3760 int_params.irq_source =
3761 dc_interrupt_to_irq_source(dc, i, 0);
3763 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3765 c_irq_params->adev = adev;
3766 c_irq_params->irq_src = int_params.irq_source;
3768 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3769 dm_pflip_high_irq, c_irq_params);
3774 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3777 DRM_ERROR("Failed to add hpd irq id!\n");
3781 register_hpd_handlers(adev);
3785 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3786 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3788 struct dc *dc = adev->dm.dc;
3789 struct common_irq_params *c_irq_params;
3790 struct dc_interrupt_params int_params = {0};
3793 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3794 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3796 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3797 &adev->dmub_outbox_irq);
3799 DRM_ERROR("Failed to add outbox irq id!\n");
3803 if (dc->ctx->dmub_srv) {
3804 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3805 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3806 int_params.irq_source =
3807 dc_interrupt_to_irq_source(dc, i, 0);
3809 c_irq_params = &adev->dm.dmub_outbox_params[0];
3811 c_irq_params->adev = adev;
3812 c_irq_params->irq_src = int_params.irq_source;
3814 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3815 dm_dmub_outbox1_low_irq, c_irq_params);
3822 * Acquires the lock for the atomic state object and returns
3823 * the new atomic state.
3825 * This should only be called during atomic check.
3827 int dm_atomic_get_state(struct drm_atomic_state *state,
3828 struct dm_atomic_state **dm_state)
3830 struct drm_device *dev = state->dev;
3831 struct amdgpu_device *adev = drm_to_adev(dev);
3832 struct amdgpu_display_manager *dm = &adev->dm;
3833 struct drm_private_state *priv_state;
3838 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3839 if (IS_ERR(priv_state))
3840 return PTR_ERR(priv_state);
3842 *dm_state = to_dm_atomic_state(priv_state);
3847 static struct dm_atomic_state *
3848 dm_atomic_get_new_state(struct drm_atomic_state *state)
3850 struct drm_device *dev = state->dev;
3851 struct amdgpu_device *adev = drm_to_adev(dev);
3852 struct amdgpu_display_manager *dm = &adev->dm;
3853 struct drm_private_obj *obj;
3854 struct drm_private_state *new_obj_state;
3857 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3858 if (obj->funcs == dm->atomic_obj.funcs)
3859 return to_dm_atomic_state(new_obj_state);
3865 static struct drm_private_state *
3866 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3868 struct dm_atomic_state *old_state, *new_state;
3870 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3874 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3876 old_state = to_dm_atomic_state(obj->state);
3878 if (old_state && old_state->context)
3879 new_state->context = dc_copy_state(old_state->context);
3881 if (!new_state->context) {
3886 return &new_state->base;
3889 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3890 struct drm_private_state *state)
3892 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3894 if (dm_state && dm_state->context)
3895 dc_release_state(dm_state->context);
3900 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3901 .atomic_duplicate_state = dm_atomic_duplicate_state,
3902 .atomic_destroy_state = dm_atomic_destroy_state,
3905 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3907 struct dm_atomic_state *state;
3910 adev->mode_info.mode_config_initialized = true;
3912 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3913 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3915 adev_to_drm(adev)->mode_config.max_width = 16384;
3916 adev_to_drm(adev)->mode_config.max_height = 16384;
3918 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3919 /* disable prefer shadow for now due to hibernation issues */
3920 adev_to_drm(adev)->mode_config.prefer_shadow = 0;
3921 /* indicates support for immediate flip */
3922 adev_to_drm(adev)->mode_config.async_page_flip = true;
3924 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3926 state = kzalloc(sizeof(*state), GFP_KERNEL);
3930 state->context = dc_create_state(adev->dm.dc);
3931 if (!state->context) {
3936 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3938 drm_atomic_private_obj_init(adev_to_drm(adev),
3939 &adev->dm.atomic_obj,
3941 &dm_atomic_state_funcs);
3943 r = amdgpu_display_modeset_create_props(adev);
3945 dc_release_state(state->context);
3950 r = amdgpu_dm_audio_init(adev);
3952 dc_release_state(state->context);
3960 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3961 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3962 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3964 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3967 #if defined(CONFIG_ACPI)
3968 struct amdgpu_dm_backlight_caps caps;
3970 memset(&caps, 0, sizeof(caps));
3972 if (dm->backlight_caps[bl_idx].caps_valid)
3975 amdgpu_acpi_get_backlight_caps(&caps);
3976 if (caps.caps_valid) {
3977 dm->backlight_caps[bl_idx].caps_valid = true;
3978 if (caps.aux_support)
3980 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3981 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3983 dm->backlight_caps[bl_idx].min_input_signal =
3984 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3985 dm->backlight_caps[bl_idx].max_input_signal =
3986 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3989 if (dm->backlight_caps[bl_idx].aux_support)
3992 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3993 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3997 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3998 unsigned *min, unsigned *max)
4003 if (caps->aux_support) {
4004 // Firmware limits are in nits, DC API wants millinits.
4005 *max = 1000 * caps->aux_max_input_signal;
4006 *min = 1000 * caps->aux_min_input_signal;
4008 // Firmware limits are 8-bit, PWM control is 16-bit.
4009 *max = 0x101 * caps->max_input_signal;
4010 *min = 0x101 * caps->min_input_signal;
4015 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
4016 uint32_t brightness)
4020 if (!get_brightness_range(caps, &min, &max))
4023 // Rescale 0..255 to min..max
4024 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
4025 AMDGPU_MAX_BL_LEVEL);
4028 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
4029 uint32_t brightness)
4033 if (!get_brightness_range(caps, &min, &max))
4036 if (brightness < min)
4038 // Rescale min..max to 0..255
4039 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
4043 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
4045 u32 user_brightness)
4047 struct amdgpu_dm_backlight_caps caps;
4048 struct dc_link *link;
4052 amdgpu_dm_update_backlight_caps(dm, bl_idx);
4053 caps = dm->backlight_caps[bl_idx];
4055 dm->brightness[bl_idx] = user_brightness;
4056 /* update scratch register */
4058 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
4059 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
4060 link = (struct dc_link *)dm->backlight_link[bl_idx];
4062 /* Change brightness based on AUX property */
4063 if (caps.aux_support) {
4064 rc = dc_link_set_backlight_level_nits(link, true, brightness,
4065 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
4067 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
4069 rc = dc_link_set_backlight_level(link, brightness, 0);
4071 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4075 dm->actual_brightness[bl_idx] = user_brightness;
4078 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4080 struct amdgpu_display_manager *dm = bl_get_data(bd);
4083 for (i = 0; i < dm->num_of_edps; i++) {
4084 if (bd == dm->backlight_dev[i])
4087 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4089 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4094 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4097 struct amdgpu_dm_backlight_caps caps;
4098 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4100 amdgpu_dm_update_backlight_caps(dm, bl_idx);
4101 caps = dm->backlight_caps[bl_idx];
4103 if (caps.aux_support) {
4107 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4109 return dm->brightness[bl_idx];
4110 return convert_brightness_to_user(&caps, avg);
4112 int ret = dc_link_get_backlight_level(link);
4114 if (ret == DC_ERROR_UNEXPECTED)
4115 return dm->brightness[bl_idx];
4116 return convert_brightness_to_user(&caps, ret);
4120 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4122 struct amdgpu_display_manager *dm = bl_get_data(bd);
4125 for (i = 0; i < dm->num_of_edps; i++) {
4126 if (bd == dm->backlight_dev[i])
4129 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4131 return amdgpu_dm_backlight_get_level(dm, i);
4134 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4135 .options = BL_CORE_SUSPENDRESUME,
4136 .get_brightness = amdgpu_dm_backlight_get_brightness,
4137 .update_status = amdgpu_dm_backlight_update_status,
4141 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4144 struct backlight_properties props = { 0 };
4146 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4147 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4149 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4150 props.brightness = AMDGPU_MAX_BL_LEVEL;
4151 props.type = BACKLIGHT_RAW;
4153 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4154 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4156 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4157 adev_to_drm(dm->adev)->dev,
4159 &amdgpu_dm_backlight_ops,
4162 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4163 DRM_ERROR("DM: Backlight registration failed!\n");
4165 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4168 static int initialize_plane(struct amdgpu_display_manager *dm,
4169 struct amdgpu_mode_info *mode_info, int plane_id,
4170 enum drm_plane_type plane_type,
4171 const struct dc_plane_cap *plane_cap)
4173 struct drm_plane *plane;
4174 unsigned long possible_crtcs;
4177 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4179 DRM_ERROR("KMS: Failed to allocate plane\n");
4182 plane->type = plane_type;
4185 * HACK: IGT tests expect that the primary plane for a CRTC
4186 * can only have one possible CRTC. Only expose support for
4187 * any CRTC if they're not going to be used as a primary plane
4188 * for a CRTC - like overlay or underlay planes.
4190 possible_crtcs = 1 << plane_id;
4191 if (plane_id >= dm->dc->caps.max_streams)
4192 possible_crtcs = 0xff;
4194 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4197 DRM_ERROR("KMS: Failed to initialize plane\n");
4203 mode_info->planes[plane_id] = plane;
4209 static void register_backlight_device(struct amdgpu_display_manager *dm,
4210 struct dc_link *link)
4212 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4213 link->type != dc_connection_none) {
4215 * Event if registration failed, we should continue with
4216 * DM initialization because not having a backlight control
4217 * is better then a black screen.
4219 if (!dm->backlight_dev[dm->num_of_edps])
4220 amdgpu_dm_register_backlight_device(dm);
4222 if (dm->backlight_dev[dm->num_of_edps]) {
4223 dm->backlight_link[dm->num_of_edps] = link;
4231 * In this architecture, the association
4232 * connector -> encoder -> crtc
4233 * id not really requried. The crtc and connector will hold the
4234 * display_index as an abstraction to use with DAL component
4236 * Returns 0 on success
4238 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4240 struct amdgpu_display_manager *dm = &adev->dm;
4242 struct amdgpu_dm_connector *aconnector = NULL;
4243 struct amdgpu_encoder *aencoder = NULL;
4244 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4246 int32_t primary_planes;
4247 enum dc_connection_type new_connection_type = dc_connection_none;
4248 const struct dc_plane_cap *plane;
4249 bool psr_feature_enabled = false;
4251 dm->display_indexes_num = dm->dc->caps.max_streams;
4252 /* Update the actual used number of crtc */
4253 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4255 link_cnt = dm->dc->caps.max_links;
4256 if (amdgpu_dm_mode_config_init(dm->adev)) {
4257 DRM_ERROR("DM: Failed to initialize mode config\n");
4261 /* There is one primary plane per CRTC */
4262 primary_planes = dm->dc->caps.max_streams;
4263 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4266 * Initialize primary planes, implicit planes for legacy IOCTLS.
4267 * Order is reversed to match iteration order in atomic check.
4269 for (i = (primary_planes - 1); i >= 0; i--) {
4270 plane = &dm->dc->caps.planes[i];
4272 if (initialize_plane(dm, mode_info, i,
4273 DRM_PLANE_TYPE_PRIMARY, plane)) {
4274 DRM_ERROR("KMS: Failed to initialize primary plane\n");
4280 * Initialize overlay planes, index starting after primary planes.
4281 * These planes have a higher DRM index than the primary planes since
4282 * they should be considered as having a higher z-order.
4283 * Order is reversed to match iteration order in atomic check.
4285 * Only support DCN for now, and only expose one so we don't encourage
4286 * userspace to use up all the pipes.
4288 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4289 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4291 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4294 if (!plane->blends_with_above || !plane->blends_with_below)
4297 if (!plane->pixel_format_support.argb8888)
4300 if (initialize_plane(dm, NULL, primary_planes + i,
4301 DRM_PLANE_TYPE_OVERLAY, plane)) {
4302 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4306 /* Only create one overlay plane. */
4310 for (i = 0; i < dm->dc->caps.max_streams; i++)
4311 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4312 DRM_ERROR("KMS: Failed to initialize crtc\n");
4316 /* Use Outbox interrupt */
4317 switch (adev->ip_versions[DCE_HWIP][0]) {
4318 case IP_VERSION(3, 0, 0):
4319 case IP_VERSION(3, 1, 2):
4320 case IP_VERSION(3, 1, 3):
4321 case IP_VERSION(3, 1, 4):
4322 case IP_VERSION(3, 1, 5):
4323 case IP_VERSION(3, 1, 6):
4324 case IP_VERSION(3, 2, 0):
4325 case IP_VERSION(3, 2, 1):
4326 case IP_VERSION(2, 1, 0):
4327 if (register_outbox_irq_handlers(dm->adev)) {
4328 DRM_ERROR("DM: Failed to initialize IRQ\n");
4333 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4334 adev->ip_versions[DCE_HWIP][0]);
4337 /* Determine whether to enable PSR support by default. */
4338 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4339 switch (adev->ip_versions[DCE_HWIP][0]) {
4340 case IP_VERSION(3, 1, 2):
4341 case IP_VERSION(3, 1, 3):
4342 case IP_VERSION(3, 1, 4):
4343 case IP_VERSION(3, 1, 5):
4344 case IP_VERSION(3, 1, 6):
4345 case IP_VERSION(3, 2, 0):
4346 case IP_VERSION(3, 2, 1):
4347 psr_feature_enabled = true;
4350 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4355 /* loops over all connectors on the board */
4356 for (i = 0; i < link_cnt; i++) {
4357 struct dc_link *link = NULL;
4359 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4361 "KMS: Cannot support more than %d display indexes\n",
4362 AMDGPU_DM_MAX_DISPLAY_INDEX);
4366 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4370 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4374 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4375 DRM_ERROR("KMS: Failed to initialize encoder\n");
4379 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4380 DRM_ERROR("KMS: Failed to initialize connector\n");
4384 link = dc_get_link_at_index(dm->dc, i);
4386 if (!dc_link_detect_sink(link, &new_connection_type))
4387 DRM_ERROR("KMS: Failed to detect connector\n");
4389 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4390 emulated_link_detect(link);
4391 amdgpu_dm_update_connector_after_detect(aconnector);
4395 mutex_lock(&dm->dc_lock);
4396 ret = dc_link_detect(link, DETECT_REASON_BOOT);
4397 mutex_unlock(&dm->dc_lock);
4400 amdgpu_dm_update_connector_after_detect(aconnector);
4401 register_backlight_device(dm, link);
4403 if (dm->num_of_edps)
4404 update_connector_ext_caps(aconnector);
4406 if (psr_feature_enabled)
4407 amdgpu_dm_set_psr_caps(link);
4409 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4410 * PSR is also supported.
4412 if (link->psr_settings.psr_feature_enabled)
4413 adev_to_drm(adev)->vblank_disable_immediate = false;
4418 /* Software is initialized. Now we can register interrupt handlers. */
4419 switch (adev->asic_type) {
4420 #if defined(CONFIG_DRM_AMD_DC_SI)
4425 if (dce60_register_irq_handlers(dm->adev)) {
4426 DRM_ERROR("DM: Failed to initialize IRQ\n");
4440 case CHIP_POLARIS11:
4441 case CHIP_POLARIS10:
4442 case CHIP_POLARIS12:
4447 if (dce110_register_irq_handlers(dm->adev)) {
4448 DRM_ERROR("DM: Failed to initialize IRQ\n");
4453 switch (adev->ip_versions[DCE_HWIP][0]) {
4454 case IP_VERSION(1, 0, 0):
4455 case IP_VERSION(1, 0, 1):
4456 case IP_VERSION(2, 0, 2):
4457 case IP_VERSION(2, 0, 3):
4458 case IP_VERSION(2, 0, 0):
4459 case IP_VERSION(2, 1, 0):
4460 case IP_VERSION(3, 0, 0):
4461 case IP_VERSION(3, 0, 2):
4462 case IP_VERSION(3, 0, 3):
4463 case IP_VERSION(3, 0, 1):
4464 case IP_VERSION(3, 1, 2):
4465 case IP_VERSION(3, 1, 3):
4466 case IP_VERSION(3, 1, 4):
4467 case IP_VERSION(3, 1, 5):
4468 case IP_VERSION(3, 1, 6):
4469 case IP_VERSION(3, 2, 0):
4470 case IP_VERSION(3, 2, 1):
4471 if (dcn10_register_irq_handlers(dm->adev)) {
4472 DRM_ERROR("DM: Failed to initialize IRQ\n");
4477 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4478 adev->ip_versions[DCE_HWIP][0]);
4492 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4494 drm_atomic_private_obj_fini(&dm->atomic_obj);
4498 /******************************************************************************
4499 * amdgpu_display_funcs functions
4500 *****************************************************************************/
4503 * dm_bandwidth_update - program display watermarks
4505 * @adev: amdgpu_device pointer
4507 * Calculate and program the display watermarks and line buffer allocation.
4509 static void dm_bandwidth_update(struct amdgpu_device *adev)
4511 /* TODO: implement later */
4514 static const struct amdgpu_display_funcs dm_display_funcs = {
4515 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4516 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4517 .backlight_set_level = NULL, /* never called for DC */
4518 .backlight_get_level = NULL, /* never called for DC */
4519 .hpd_sense = NULL,/* called unconditionally */
4520 .hpd_set_polarity = NULL, /* called unconditionally */
4521 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4522 .page_flip_get_scanoutpos =
4523 dm_crtc_get_scanoutpos,/* called unconditionally */
4524 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4525 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4528 #if defined(CONFIG_DEBUG_KERNEL_DC)
4530 static ssize_t s3_debug_store(struct device *device,
4531 struct device_attribute *attr,
4537 struct drm_device *drm_dev = dev_get_drvdata(device);
4538 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4540 ret = kstrtoint(buf, 0, &s3_state);
4545 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4550 return ret == 0 ? count : 0;
4553 DEVICE_ATTR_WO(s3_debug);
4557 static int dm_early_init(void *handle)
4559 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4561 switch (adev->asic_type) {
4562 #if defined(CONFIG_DRM_AMD_DC_SI)
4566 adev->mode_info.num_crtc = 6;
4567 adev->mode_info.num_hpd = 6;
4568 adev->mode_info.num_dig = 6;
4571 adev->mode_info.num_crtc = 2;
4572 adev->mode_info.num_hpd = 2;
4573 adev->mode_info.num_dig = 2;
4578 adev->mode_info.num_crtc = 6;
4579 adev->mode_info.num_hpd = 6;
4580 adev->mode_info.num_dig = 6;
4583 adev->mode_info.num_crtc = 4;
4584 adev->mode_info.num_hpd = 6;
4585 adev->mode_info.num_dig = 7;
4589 adev->mode_info.num_crtc = 2;
4590 adev->mode_info.num_hpd = 6;
4591 adev->mode_info.num_dig = 6;
4595 adev->mode_info.num_crtc = 6;
4596 adev->mode_info.num_hpd = 6;
4597 adev->mode_info.num_dig = 7;
4600 adev->mode_info.num_crtc = 3;
4601 adev->mode_info.num_hpd = 6;
4602 adev->mode_info.num_dig = 9;
4605 adev->mode_info.num_crtc = 2;
4606 adev->mode_info.num_hpd = 6;
4607 adev->mode_info.num_dig = 9;
4609 case CHIP_POLARIS11:
4610 case CHIP_POLARIS12:
4611 adev->mode_info.num_crtc = 5;
4612 adev->mode_info.num_hpd = 5;
4613 adev->mode_info.num_dig = 5;
4615 case CHIP_POLARIS10:
4617 adev->mode_info.num_crtc = 6;
4618 adev->mode_info.num_hpd = 6;
4619 adev->mode_info.num_dig = 6;
4624 adev->mode_info.num_crtc = 6;
4625 adev->mode_info.num_hpd = 6;
4626 adev->mode_info.num_dig = 6;
4630 switch (adev->ip_versions[DCE_HWIP][0]) {
4631 case IP_VERSION(2, 0, 2):
4632 case IP_VERSION(3, 0, 0):
4633 adev->mode_info.num_crtc = 6;
4634 adev->mode_info.num_hpd = 6;
4635 adev->mode_info.num_dig = 6;
4637 case IP_VERSION(2, 0, 0):
4638 case IP_VERSION(3, 0, 2):
4639 adev->mode_info.num_crtc = 5;
4640 adev->mode_info.num_hpd = 5;
4641 adev->mode_info.num_dig = 5;
4643 case IP_VERSION(2, 0, 3):
4644 case IP_VERSION(3, 0, 3):
4645 adev->mode_info.num_crtc = 2;
4646 adev->mode_info.num_hpd = 2;
4647 adev->mode_info.num_dig = 2;
4649 case IP_VERSION(1, 0, 0):
4650 case IP_VERSION(1, 0, 1):
4651 case IP_VERSION(3, 0, 1):
4652 case IP_VERSION(2, 1, 0):
4653 case IP_VERSION(3, 1, 2):
4654 case IP_VERSION(3, 1, 3):
4655 case IP_VERSION(3, 1, 4):
4656 case IP_VERSION(3, 1, 5):
4657 case IP_VERSION(3, 1, 6):
4658 case IP_VERSION(3, 2, 0):
4659 case IP_VERSION(3, 2, 1):
4660 adev->mode_info.num_crtc = 4;
4661 adev->mode_info.num_hpd = 4;
4662 adev->mode_info.num_dig = 4;
4665 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4666 adev->ip_versions[DCE_HWIP][0]);
4672 amdgpu_dm_set_irq_funcs(adev);
4674 if (adev->mode_info.funcs == NULL)
4675 adev->mode_info.funcs = &dm_display_funcs;
4678 * Note: Do NOT change adev->audio_endpt_rreg and
4679 * adev->audio_endpt_wreg because they are initialised in
4680 * amdgpu_device_init()
4682 #if defined(CONFIG_DEBUG_KERNEL_DC)
4684 adev_to_drm(adev)->dev,
4685 &dev_attr_s3_debug);
4691 static bool modeset_required(struct drm_crtc_state *crtc_state,
4692 struct dc_stream_state *new_stream,
4693 struct dc_stream_state *old_stream)
4695 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4698 static bool modereset_required(struct drm_crtc_state *crtc_state)
4700 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4703 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4705 drm_encoder_cleanup(encoder);
4709 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4710 .destroy = amdgpu_dm_encoder_destroy,
4714 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4715 struct drm_framebuffer *fb,
4716 int *min_downscale, int *max_upscale)
4718 struct amdgpu_device *adev = drm_to_adev(dev);
4719 struct dc *dc = adev->dm.dc;
4720 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4721 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4723 switch (fb->format->format) {
4724 case DRM_FORMAT_P010:
4725 case DRM_FORMAT_NV12:
4726 case DRM_FORMAT_NV21:
4727 *max_upscale = plane_cap->max_upscale_factor.nv12;
4728 *min_downscale = plane_cap->max_downscale_factor.nv12;
4731 case DRM_FORMAT_XRGB16161616F:
4732 case DRM_FORMAT_ARGB16161616F:
4733 case DRM_FORMAT_XBGR16161616F:
4734 case DRM_FORMAT_ABGR16161616F:
4735 *max_upscale = plane_cap->max_upscale_factor.fp16;
4736 *min_downscale = plane_cap->max_downscale_factor.fp16;
4740 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4741 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4746 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4747 * scaling factor of 1.0 == 1000 units.
4749 if (*max_upscale == 1)
4750 *max_upscale = 1000;
4752 if (*min_downscale == 1)
4753 *min_downscale = 1000;
4757 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4758 const struct drm_plane_state *state,
4759 struct dc_scaling_info *scaling_info)
4761 int scale_w, scale_h, min_downscale, max_upscale;
4763 memset(scaling_info, 0, sizeof(*scaling_info));
4765 /* Source is fixed 16.16 but we ignore mantissa for now... */
4766 scaling_info->src_rect.x = state->src_x >> 16;
4767 scaling_info->src_rect.y = state->src_y >> 16;
4770 * For reasons we don't (yet) fully understand a non-zero
4771 * src_y coordinate into an NV12 buffer can cause a
4772 * system hang on DCN1x.
4773 * To avoid hangs (and maybe be overly cautious)
4774 * let's reject both non-zero src_x and src_y.
4776 * We currently know of only one use-case to reproduce a
4777 * scenario with non-zero src_x and src_y for NV12, which
4778 * is to gesture the YouTube Android app into full screen
4781 if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4782 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4783 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4784 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4787 scaling_info->src_rect.width = state->src_w >> 16;
4788 if (scaling_info->src_rect.width == 0)
4791 scaling_info->src_rect.height = state->src_h >> 16;
4792 if (scaling_info->src_rect.height == 0)
4795 scaling_info->dst_rect.x = state->crtc_x;
4796 scaling_info->dst_rect.y = state->crtc_y;
4798 if (state->crtc_w == 0)
4801 scaling_info->dst_rect.width = state->crtc_w;
4803 if (state->crtc_h == 0)
4806 scaling_info->dst_rect.height = state->crtc_h;
4808 /* DRM doesn't specify clipping on destination output. */
4809 scaling_info->clip_rect = scaling_info->dst_rect;
4811 /* Validate scaling per-format with DC plane caps */
4812 if (state->plane && state->plane->dev && state->fb) {
4813 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4814 &min_downscale, &max_upscale);
4816 min_downscale = 250;
4817 max_upscale = 16000;
4820 scale_w = scaling_info->dst_rect.width * 1000 /
4821 scaling_info->src_rect.width;
4823 if (scale_w < min_downscale || scale_w > max_upscale)
4826 scale_h = scaling_info->dst_rect.height * 1000 /
4827 scaling_info->src_rect.height;
4829 if (scale_h < min_downscale || scale_h > max_upscale)
4833 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4834 * assume reasonable defaults based on the format.
4841 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4842 uint64_t tiling_flags)
4844 /* Fill GFX8 params */
4845 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4846 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4848 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4849 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4850 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4851 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4852 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4854 /* XXX fix me for VI */
4855 tiling_info->gfx8.num_banks = num_banks;
4856 tiling_info->gfx8.array_mode =
4857 DC_ARRAY_2D_TILED_THIN1;
4858 tiling_info->gfx8.tile_split = tile_split;
4859 tiling_info->gfx8.bank_width = bankw;
4860 tiling_info->gfx8.bank_height = bankh;
4861 tiling_info->gfx8.tile_aspect = mtaspect;
4862 tiling_info->gfx8.tile_mode =
4863 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4864 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4865 == DC_ARRAY_1D_TILED_THIN1) {
4866 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4869 tiling_info->gfx8.pipe_config =
4870 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4874 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4875 union dc_tiling_info *tiling_info)
4877 tiling_info->gfx9.num_pipes =
4878 adev->gfx.config.gb_addr_config_fields.num_pipes;
4879 tiling_info->gfx9.num_banks =
4880 adev->gfx.config.gb_addr_config_fields.num_banks;
4881 tiling_info->gfx9.pipe_interleave =
4882 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4883 tiling_info->gfx9.num_shader_engines =
4884 adev->gfx.config.gb_addr_config_fields.num_se;
4885 tiling_info->gfx9.max_compressed_frags =
4886 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4887 tiling_info->gfx9.num_rb_per_se =
4888 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4889 tiling_info->gfx9.shaderEnable = 1;
4890 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4891 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4895 validate_dcc(struct amdgpu_device *adev,
4896 const enum surface_pixel_format format,
4897 const enum dc_rotation_angle rotation,
4898 const union dc_tiling_info *tiling_info,
4899 const struct dc_plane_dcc_param *dcc,
4900 const struct dc_plane_address *address,
4901 const struct plane_size *plane_size)
4903 struct dc *dc = adev->dm.dc;
4904 struct dc_dcc_surface_param input;
4905 struct dc_surface_dcc_cap output;
4907 memset(&input, 0, sizeof(input));
4908 memset(&output, 0, sizeof(output));
4913 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4914 !dc->cap_funcs.get_dcc_compression_cap)
4917 input.format = format;
4918 input.surface_size.width = plane_size->surface_size.width;
4919 input.surface_size.height = plane_size->surface_size.height;
4920 input.swizzle_mode = tiling_info->gfx9.swizzle;
4922 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4923 input.scan = SCAN_DIRECTION_HORIZONTAL;
4924 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4925 input.scan = SCAN_DIRECTION_VERTICAL;
4927 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4930 if (!output.capable)
4933 if (dcc->independent_64b_blks == 0 &&
4934 output.grph.rgb.independent_64b_blks != 0)
4941 modifier_has_dcc(uint64_t modifier)
4943 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4947 modifier_gfx9_swizzle_mode(uint64_t modifier)
4949 if (modifier == DRM_FORMAT_MOD_LINEAR)
4952 return AMD_FMT_MOD_GET(TILE, modifier);
4955 static const struct drm_format_info *
4956 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4958 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4962 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4963 union dc_tiling_info *tiling_info,
4966 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4967 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4968 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4969 unsigned int pipes_log2;
4971 pipes_log2 = min(5u, mod_pipe_xor_bits);
4973 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4975 if (!IS_AMD_FMT_MOD(modifier))
4978 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4979 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4981 if (adev->family >= AMDGPU_FAMILY_NV) {
4982 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4984 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4986 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4990 enum dm_micro_swizzle {
4991 MICRO_SWIZZLE_Z = 0,
4992 MICRO_SWIZZLE_S = 1,
4993 MICRO_SWIZZLE_D = 2,
4997 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
5001 struct amdgpu_device *adev = drm_to_adev(plane->dev);
5002 const struct drm_format_info *info = drm_format_info(format);
5005 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
5011 * We always have to allow these modifiers:
5012 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
5013 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
5015 if (modifier == DRM_FORMAT_MOD_LINEAR ||
5016 modifier == DRM_FORMAT_MOD_INVALID) {
5020 /* Check that the modifier is on the list of the plane's supported modifiers. */
5021 for (i = 0; i < plane->modifier_count; i++) {
5022 if (modifier == plane->modifiers[i])
5025 if (i == plane->modifier_count)
5029 * For D swizzle the canonical modifier depends on the bpp, so check
5032 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
5033 adev->family >= AMDGPU_FAMILY_NV) {
5034 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
5038 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
5042 if (modifier_has_dcc(modifier)) {
5043 /* Per radeonsi comments 16/64 bpp are more complicated. */
5044 if (info->cpp[0] != 4)
5046 /* We support multi-planar formats, but not when combined with
5047 * additional DCC metadata planes. */
5048 if (info->num_planes > 1)
5056 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
5061 if (*cap - *size < 1) {
5062 uint64_t new_cap = *cap * 2;
5063 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
5071 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
5077 (*mods)[*size] = mod;
5082 add_gfx9_modifiers(const struct amdgpu_device *adev,
5083 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5085 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5086 int pipe_xor_bits = min(8, pipes +
5087 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
5088 int bank_xor_bits = min(8 - pipe_xor_bits,
5089 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
5090 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
5091 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
5094 if (adev->family == AMDGPU_FAMILY_RV) {
5095 /* Raven2 and later */
5096 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
5099 * No _D DCC swizzles yet because we only allow 32bpp, which
5100 * doesn't support _D on DCN
5103 if (has_constant_encode) {
5104 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5105 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5106 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5107 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5108 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5109 AMD_FMT_MOD_SET(DCC, 1) |
5110 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5111 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5112 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5115 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5116 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5117 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5118 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5119 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5120 AMD_FMT_MOD_SET(DCC, 1) |
5121 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5122 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5123 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5125 if (has_constant_encode) {
5126 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5127 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5128 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5129 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5130 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5131 AMD_FMT_MOD_SET(DCC, 1) |
5132 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5133 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5134 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5136 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5137 AMD_FMT_MOD_SET(RB, rb) |
5138 AMD_FMT_MOD_SET(PIPE, pipes));
5141 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5142 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5143 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5144 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5145 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5146 AMD_FMT_MOD_SET(DCC, 1) |
5147 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5148 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5149 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5150 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5151 AMD_FMT_MOD_SET(RB, rb) |
5152 AMD_FMT_MOD_SET(PIPE, pipes));
5156 * Only supported for 64bpp on Raven, will be filtered on format in
5157 * dm_plane_format_mod_supported.
5159 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5160 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5161 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5162 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5163 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5165 if (adev->family == AMDGPU_FAMILY_RV) {
5166 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5167 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5168 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5169 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5170 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5174 * Only supported for 64bpp on Raven, will be filtered on format in
5175 * dm_plane_format_mod_supported.
5177 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5178 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5179 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5181 if (adev->family == AMDGPU_FAMILY_RV) {
5182 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5183 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5184 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5189 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5190 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5192 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5194 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5195 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5196 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5197 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5198 AMD_FMT_MOD_SET(DCC, 1) |
5199 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5200 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5201 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5203 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5204 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5205 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5206 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5207 AMD_FMT_MOD_SET(DCC, 1) |
5208 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5209 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5210 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5211 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5213 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5214 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5215 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5216 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5218 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5219 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5220 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5221 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5224 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5225 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5226 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5227 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5229 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5230 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5231 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5235 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5236 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5238 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5239 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5241 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5242 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5243 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5244 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5245 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5246 AMD_FMT_MOD_SET(DCC, 1) |
5247 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5248 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5249 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5250 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5252 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5253 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5254 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5255 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5256 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5257 AMD_FMT_MOD_SET(DCC, 1) |
5258 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5259 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5260 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5262 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5263 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5264 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5265 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5266 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5267 AMD_FMT_MOD_SET(DCC, 1) |
5268 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5269 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5270 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5271 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5272 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5274 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5275 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5276 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5277 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5278 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5279 AMD_FMT_MOD_SET(DCC, 1) |
5280 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5281 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5282 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5283 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5285 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5286 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5287 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5288 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5289 AMD_FMT_MOD_SET(PACKERS, pkrs));
5291 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5292 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5293 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5294 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5295 AMD_FMT_MOD_SET(PACKERS, pkrs));
5297 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5298 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5299 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5300 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5302 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5303 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5304 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5308 add_gfx11_modifiers(struct amdgpu_device *adev,
5309 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5312 int pipe_xor_bits = 0;
5317 unsigned swizzle_r_x;
5318 uint64_t modifier_r_x;
5319 uint64_t modifier_dcc_best;
5320 uint64_t modifier_dcc_4k;
5322 /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from
5323 * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} */
5324 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
5325 ASSERT(gb_addr_config != 0);
5327 num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
5328 pkrs = ilog2(num_pkrs);
5329 num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES);
5330 pipe_xor_bits = ilog2(num_pipes);
5332 for (i = 0; i < 2; i++) {
5333 /* Insert the best one first. */
5334 /* R_X swizzle modes are the best for rendering and DCC requires them. */
5336 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
5338 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
5340 modifier_r_x = AMD_FMT_MOD |
5341 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
5342 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5343 AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
5344 AMD_FMT_MOD_SET(PACKERS, pkrs);
5346 /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
5347 modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
5348 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
5349 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5350 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
5352 /* DCC settings for 4K and greater resolutions. (required by display hw) */
5353 modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
5354 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5355 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5356 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
5358 add_modifier(mods, size, capacity, modifier_dcc_best);
5359 add_modifier(mods, size, capacity, modifier_dcc_4k);
5361 add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
5362 add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
5364 add_modifier(mods, size, capacity, modifier_r_x);
5367 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5368 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
5369 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
5373 get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5375 uint64_t size = 0, capacity = 128;
5378 /* We have not hooked up any pre-GFX9 modifiers. */
5379 if (adev->family < AMDGPU_FAMILY_AI)
5382 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5384 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5385 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5386 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5387 return *mods ? 0 : -ENOMEM;
5390 switch (adev->family) {
5391 case AMDGPU_FAMILY_AI:
5392 case AMDGPU_FAMILY_RV:
5393 add_gfx9_modifiers(adev, mods, &size, &capacity);
5395 case AMDGPU_FAMILY_NV:
5396 case AMDGPU_FAMILY_VGH:
5397 case AMDGPU_FAMILY_YC:
5398 case AMDGPU_FAMILY_GC_10_3_6:
5399 case AMDGPU_FAMILY_GC_10_3_7:
5400 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5401 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5403 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5405 case AMDGPU_FAMILY_GC_11_0_0:
5406 case AMDGPU_FAMILY_GC_11_0_2:
5407 add_gfx11_modifiers(adev, mods, &size, &capacity);
5411 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5413 /* INVALID marks the end of the list. */
5414 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5423 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5424 const struct amdgpu_framebuffer *afb,
5425 const enum surface_pixel_format format,
5426 const enum dc_rotation_angle rotation,
5427 const struct plane_size *plane_size,
5428 union dc_tiling_info *tiling_info,
5429 struct dc_plane_dcc_param *dcc,
5430 struct dc_plane_address *address,
5431 const bool force_disable_dcc)
5433 const uint64_t modifier = afb->base.modifier;
5436 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5437 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5439 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5440 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5441 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5442 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5445 dcc->meta_pitch = afb->base.pitches[1];
5446 dcc->independent_64b_blks = independent_64b_blks;
5447 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5448 if (independent_64b_blks && independent_128b_blks)
5449 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5450 else if (independent_128b_blks)
5451 dcc->dcc_ind_blk = hubp_ind_block_128b;
5452 else if (independent_64b_blks && !independent_128b_blks)
5453 dcc->dcc_ind_blk = hubp_ind_block_64b;
5455 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5457 if (independent_64b_blks)
5458 dcc->dcc_ind_blk = hubp_ind_block_64b;
5460 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5463 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5464 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5467 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5469 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5475 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5476 const struct amdgpu_framebuffer *afb,
5477 const enum surface_pixel_format format,
5478 const enum dc_rotation_angle rotation,
5479 const uint64_t tiling_flags,
5480 union dc_tiling_info *tiling_info,
5481 struct plane_size *plane_size,
5482 struct dc_plane_dcc_param *dcc,
5483 struct dc_plane_address *address,
5485 bool force_disable_dcc)
5487 const struct drm_framebuffer *fb = &afb->base;
5490 memset(tiling_info, 0, sizeof(*tiling_info));
5491 memset(plane_size, 0, sizeof(*plane_size));
5492 memset(dcc, 0, sizeof(*dcc));
5493 memset(address, 0, sizeof(*address));
5495 address->tmz_surface = tmz_surface;
5497 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5498 uint64_t addr = afb->address + fb->offsets[0];
5500 plane_size->surface_size.x = 0;
5501 plane_size->surface_size.y = 0;
5502 plane_size->surface_size.width = fb->width;
5503 plane_size->surface_size.height = fb->height;
5504 plane_size->surface_pitch =
5505 fb->pitches[0] / fb->format->cpp[0];
5507 address->type = PLN_ADDR_TYPE_GRAPHICS;
5508 address->grph.addr.low_part = lower_32_bits(addr);
5509 address->grph.addr.high_part = upper_32_bits(addr);
5510 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5511 uint64_t luma_addr = afb->address + fb->offsets[0];
5512 uint64_t chroma_addr = afb->address + fb->offsets[1];
5514 plane_size->surface_size.x = 0;
5515 plane_size->surface_size.y = 0;
5516 plane_size->surface_size.width = fb->width;
5517 plane_size->surface_size.height = fb->height;
5518 plane_size->surface_pitch =
5519 fb->pitches[0] / fb->format->cpp[0];
5521 plane_size->chroma_size.x = 0;
5522 plane_size->chroma_size.y = 0;
5523 /* TODO: set these based on surface format */
5524 plane_size->chroma_size.width = fb->width / 2;
5525 plane_size->chroma_size.height = fb->height / 2;
5527 plane_size->chroma_pitch =
5528 fb->pitches[1] / fb->format->cpp[1];
5530 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5531 address->video_progressive.luma_addr.low_part =
5532 lower_32_bits(luma_addr);
5533 address->video_progressive.luma_addr.high_part =
5534 upper_32_bits(luma_addr);
5535 address->video_progressive.chroma_addr.low_part =
5536 lower_32_bits(chroma_addr);
5537 address->video_progressive.chroma_addr.high_part =
5538 upper_32_bits(chroma_addr);
5541 if (adev->family >= AMDGPU_FAMILY_AI) {
5542 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5543 rotation, plane_size,
5550 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5557 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5558 bool *per_pixel_alpha, bool *pre_multiplied_alpha,
5559 bool *global_alpha, int *global_alpha_value)
5561 *per_pixel_alpha = false;
5562 *pre_multiplied_alpha = true;
5563 *global_alpha = false;
5564 *global_alpha_value = 0xff;
5566 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5569 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
5570 plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
5571 static const uint32_t alpha_formats[] = {
5572 DRM_FORMAT_ARGB8888,
5573 DRM_FORMAT_RGBA8888,
5574 DRM_FORMAT_ABGR8888,
5576 uint32_t format = plane_state->fb->format->format;
5579 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5580 if (format == alpha_formats[i]) {
5581 *per_pixel_alpha = true;
5586 if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
5587 *pre_multiplied_alpha = false;
5590 if (plane_state->alpha < 0xffff) {
5591 *global_alpha = true;
5592 *global_alpha_value = plane_state->alpha >> 8;
5597 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5598 const enum surface_pixel_format format,
5599 enum dc_color_space *color_space)
5603 *color_space = COLOR_SPACE_SRGB;
5605 /* DRM color properties only affect non-RGB formats. */
5606 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5609 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5611 switch (plane_state->color_encoding) {
5612 case DRM_COLOR_YCBCR_BT601:
5614 *color_space = COLOR_SPACE_YCBCR601;
5616 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5619 case DRM_COLOR_YCBCR_BT709:
5621 *color_space = COLOR_SPACE_YCBCR709;
5623 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5626 case DRM_COLOR_YCBCR_BT2020:
5628 *color_space = COLOR_SPACE_2020_YCBCR;
5641 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5642 const struct drm_plane_state *plane_state,
5643 const uint64_t tiling_flags,
5644 struct dc_plane_info *plane_info,
5645 struct dc_plane_address *address,
5647 bool force_disable_dcc)
5649 const struct drm_framebuffer *fb = plane_state->fb;
5650 const struct amdgpu_framebuffer *afb =
5651 to_amdgpu_framebuffer(plane_state->fb);
5654 memset(plane_info, 0, sizeof(*plane_info));
5656 switch (fb->format->format) {
5658 plane_info->format =
5659 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5661 case DRM_FORMAT_RGB565:
5662 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5664 case DRM_FORMAT_XRGB8888:
5665 case DRM_FORMAT_ARGB8888:
5666 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5668 case DRM_FORMAT_XRGB2101010:
5669 case DRM_FORMAT_ARGB2101010:
5670 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5672 case DRM_FORMAT_XBGR2101010:
5673 case DRM_FORMAT_ABGR2101010:
5674 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5676 case DRM_FORMAT_XBGR8888:
5677 case DRM_FORMAT_ABGR8888:
5678 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5680 case DRM_FORMAT_NV21:
5681 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5683 case DRM_FORMAT_NV12:
5684 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5686 case DRM_FORMAT_P010:
5687 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5689 case DRM_FORMAT_XRGB16161616F:
5690 case DRM_FORMAT_ARGB16161616F:
5691 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5693 case DRM_FORMAT_XBGR16161616F:
5694 case DRM_FORMAT_ABGR16161616F:
5695 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5697 case DRM_FORMAT_XRGB16161616:
5698 case DRM_FORMAT_ARGB16161616:
5699 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5701 case DRM_FORMAT_XBGR16161616:
5702 case DRM_FORMAT_ABGR16161616:
5703 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5707 "Unsupported screen format %p4cc\n",
5708 &fb->format->format);
5712 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5713 case DRM_MODE_ROTATE_0:
5714 plane_info->rotation = ROTATION_ANGLE_0;
5716 case DRM_MODE_ROTATE_90:
5717 plane_info->rotation = ROTATION_ANGLE_90;
5719 case DRM_MODE_ROTATE_180:
5720 plane_info->rotation = ROTATION_ANGLE_180;
5722 case DRM_MODE_ROTATE_270:
5723 plane_info->rotation = ROTATION_ANGLE_270;
5726 plane_info->rotation = ROTATION_ANGLE_0;
5730 plane_info->visible = true;
5731 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5733 plane_info->layer_index = 0;
5735 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5736 &plane_info->color_space);
5740 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5741 plane_info->rotation, tiling_flags,
5742 &plane_info->tiling_info,
5743 &plane_info->plane_size,
5744 &plane_info->dcc, address, tmz_surface,
5749 fill_blending_from_plane_state(
5750 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
5751 &plane_info->global_alpha, &plane_info->global_alpha_value);
5756 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5757 struct dc_plane_state *dc_plane_state,
5758 struct drm_plane_state *plane_state,
5759 struct drm_crtc_state *crtc_state)
5761 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5762 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5763 struct dc_scaling_info scaling_info;
5764 struct dc_plane_info plane_info;
5766 bool force_disable_dcc = false;
5768 ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5772 dc_plane_state->src_rect = scaling_info.src_rect;
5773 dc_plane_state->dst_rect = scaling_info.dst_rect;
5774 dc_plane_state->clip_rect = scaling_info.clip_rect;
5775 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5777 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5778 ret = fill_dc_plane_info_and_addr(adev, plane_state,
5781 &dc_plane_state->address,
5787 dc_plane_state->format = plane_info.format;
5788 dc_plane_state->color_space = plane_info.color_space;
5789 dc_plane_state->format = plane_info.format;
5790 dc_plane_state->plane_size = plane_info.plane_size;
5791 dc_plane_state->rotation = plane_info.rotation;
5792 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5793 dc_plane_state->stereo_format = plane_info.stereo_format;
5794 dc_plane_state->tiling_info = plane_info.tiling_info;
5795 dc_plane_state->visible = plane_info.visible;
5796 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5797 dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
5798 dc_plane_state->global_alpha = plane_info.global_alpha;
5799 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5800 dc_plane_state->dcc = plane_info.dcc;
5801 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5802 dc_plane_state->flip_int_enabled = true;
5805 * Always set input transfer function, since plane state is refreshed
5808 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5816 * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
5818 * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
5820 * @old_plane_state: Old state of @plane
5821 * @new_plane_state: New state of @plane
5822 * @crtc_state: New state of CRTC connected to the @plane
5823 * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
5825 * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
5826 * (referred to as "damage clips" in DRM nomenclature) that require updating on
5827 * the eDP remote buffer. The responsibility of specifying the dirty regions is
5830 * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
5831 * plane with regions that require flushing to the eDP remote buffer. In
5832 * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
5833 * implicitly provide damage clips without any client support via the plane
5836 * Today, amdgpu_dm only supports the MPO and cursor usecase.
5838 * TODO: Also enable for FB_DAMAGE_CLIPS
5840 static void fill_dc_dirty_rects(struct drm_plane *plane,
5841 struct drm_plane_state *old_plane_state,
5842 struct drm_plane_state *new_plane_state,
5843 struct drm_crtc_state *crtc_state,
5844 struct dc_flip_addrs *flip_addrs)
5846 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5847 struct rect *dirty_rects = flip_addrs->dirty_rects;
5853 flip_addrs->dirty_rect_count = 0;
5856 * Cursor plane has it's own dirty rect update interface. See
5857 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
5859 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5863 * Today, we only consider MPO use-case for PSR SU. If MPO not
5864 * requested, and there is a plane update, do FFU.
5866 if (!dm_crtc_state->mpo_requested) {
5867 dirty_rects[0].x = 0;
5868 dirty_rects[0].y = 0;
5869 dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
5870 dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
5871 flip_addrs->dirty_rect_count = 1;
5872 DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
5873 new_plane_state->plane->base.id,
5874 dm_crtc_state->base.mode.crtc_hdisplay,
5875 dm_crtc_state->base.mode.crtc_vdisplay);
5880 * MPO is requested. Add entire plane bounding box to dirty rects if
5881 * flipped to or damaged.
5883 * If plane is moved or resized, also add old bounding box to dirty
5886 num_clips = drm_plane_get_damage_clips_count(new_plane_state);
5887 fb_changed = old_plane_state->fb->base.id !=
5888 new_plane_state->fb->base.id;
5889 bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
5890 old_plane_state->crtc_y != new_plane_state->crtc_y ||
5891 old_plane_state->crtc_w != new_plane_state->crtc_w ||
5892 old_plane_state->crtc_h != new_plane_state->crtc_h);
5894 DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
5895 new_plane_state->plane->base.id,
5896 bb_changed, fb_changed, num_clips);
5898 if (num_clips || fb_changed || bb_changed) {
5899 dirty_rects[i].x = new_plane_state->crtc_x;
5900 dirty_rects[i].y = new_plane_state->crtc_y;
5901 dirty_rects[i].width = new_plane_state->crtc_w;
5902 dirty_rects[i].height = new_plane_state->crtc_h;
5903 DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
5904 new_plane_state->plane->base.id,
5905 dirty_rects[i].x, dirty_rects[i].y,
5906 dirty_rects[i].width, dirty_rects[i].height);
5910 /* Add old plane bounding-box if plane is moved or resized */
5912 dirty_rects[i].x = old_plane_state->crtc_x;
5913 dirty_rects[i].y = old_plane_state->crtc_y;
5914 dirty_rects[i].width = old_plane_state->crtc_w;
5915 dirty_rects[i].height = old_plane_state->crtc_h;
5916 DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
5917 old_plane_state->plane->base.id,
5918 dirty_rects[i].x, dirty_rects[i].y,
5919 dirty_rects[i].width, dirty_rects[i].height);
5923 flip_addrs->dirty_rect_count = i;
5926 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5927 const struct dm_connector_state *dm_state,
5928 struct dc_stream_state *stream)
5930 enum amdgpu_rmx_type rmx_type;
5932 struct rect src = { 0 }; /* viewport in composition space*/
5933 struct rect dst = { 0 }; /* stream addressable area */
5935 /* no mode. nothing to be done */
5939 /* Full screen scaling by default */
5940 src.width = mode->hdisplay;
5941 src.height = mode->vdisplay;
5942 dst.width = stream->timing.h_addressable;
5943 dst.height = stream->timing.v_addressable;
5946 rmx_type = dm_state->scaling;
5947 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5948 if (src.width * dst.height <
5949 src.height * dst.width) {
5950 /* height needs less upscaling/more downscaling */
5951 dst.width = src.width *
5952 dst.height / src.height;
5954 /* width needs less upscaling/more downscaling */
5955 dst.height = src.height *
5956 dst.width / src.width;
5958 } else if (rmx_type == RMX_CENTER) {
5962 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5963 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5965 if (dm_state->underscan_enable) {
5966 dst.x += dm_state->underscan_hborder / 2;
5967 dst.y += dm_state->underscan_vborder / 2;
5968 dst.width -= dm_state->underscan_hborder;
5969 dst.height -= dm_state->underscan_vborder;
5976 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5977 dst.x, dst.y, dst.width, dst.height);
5981 static enum dc_color_depth
5982 convert_color_depth_from_display_info(const struct drm_connector *connector,
5983 bool is_y420, int requested_bpc)
5990 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5991 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5993 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5995 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5998 bpc = (uint8_t)connector->display_info.bpc;
5999 /* Assume 8 bpc by default if no bpc is specified. */
6000 bpc = bpc ? bpc : 8;
6003 if (requested_bpc > 0) {
6005 * Cap display bpc based on the user requested value.
6007 * The value for state->max_bpc may not correctly updated
6008 * depending on when the connector gets added to the state
6009 * or if this was called outside of atomic check, so it
6010 * can't be used directly.
6012 bpc = min_t(u8, bpc, requested_bpc);
6014 /* Round down to the nearest even number. */
6015 bpc = bpc - (bpc & 1);
6021 * Temporary Work around, DRM doesn't parse color depth for
6022 * EDID revision before 1.4
6023 * TODO: Fix edid parsing
6025 return COLOR_DEPTH_888;
6027 return COLOR_DEPTH_666;
6029 return COLOR_DEPTH_888;
6031 return COLOR_DEPTH_101010;
6033 return COLOR_DEPTH_121212;
6035 return COLOR_DEPTH_141414;
6037 return COLOR_DEPTH_161616;
6039 return COLOR_DEPTH_UNDEFINED;
6043 static enum dc_aspect_ratio
6044 get_aspect_ratio(const struct drm_display_mode *mode_in)
6046 /* 1-1 mapping, since both enums follow the HDMI spec. */
6047 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
6050 static enum dc_color_space
6051 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
6053 enum dc_color_space color_space = COLOR_SPACE_SRGB;
6055 switch (dc_crtc_timing->pixel_encoding) {
6056 case PIXEL_ENCODING_YCBCR422:
6057 case PIXEL_ENCODING_YCBCR444:
6058 case PIXEL_ENCODING_YCBCR420:
6061 * 27030khz is the separation point between HDTV and SDTV
6062 * according to HDMI spec, we use YCbCr709 and YCbCr601
6065 if (dc_crtc_timing->pix_clk_100hz > 270300) {
6066 if (dc_crtc_timing->flags.Y_ONLY)
6068 COLOR_SPACE_YCBCR709_LIMITED;
6070 color_space = COLOR_SPACE_YCBCR709;
6072 if (dc_crtc_timing->flags.Y_ONLY)
6074 COLOR_SPACE_YCBCR601_LIMITED;
6076 color_space = COLOR_SPACE_YCBCR601;
6081 case PIXEL_ENCODING_RGB:
6082 color_space = COLOR_SPACE_SRGB;
6093 static bool adjust_colour_depth_from_display_info(
6094 struct dc_crtc_timing *timing_out,
6095 const struct drm_display_info *info)
6097 enum dc_color_depth depth = timing_out->display_color_depth;
6100 normalized_clk = timing_out->pix_clk_100hz / 10;
6101 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
6102 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
6103 normalized_clk /= 2;
6104 /* Adjusting pix clock following on HDMI spec based on colour depth */
6106 case COLOR_DEPTH_888:
6108 case COLOR_DEPTH_101010:
6109 normalized_clk = (normalized_clk * 30) / 24;
6111 case COLOR_DEPTH_121212:
6112 normalized_clk = (normalized_clk * 36) / 24;
6114 case COLOR_DEPTH_161616:
6115 normalized_clk = (normalized_clk * 48) / 24;
6118 /* The above depths are the only ones valid for HDMI. */
6121 if (normalized_clk <= info->max_tmds_clock) {
6122 timing_out->display_color_depth = depth;
6125 } while (--depth > COLOR_DEPTH_666);
6129 static void fill_stream_properties_from_drm_display_mode(
6130 struct dc_stream_state *stream,
6131 const struct drm_display_mode *mode_in,
6132 const struct drm_connector *connector,
6133 const struct drm_connector_state *connector_state,
6134 const struct dc_stream_state *old_stream,
6137 struct dc_crtc_timing *timing_out = &stream->timing;
6138 const struct drm_display_info *info = &connector->display_info;
6139 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6140 struct hdmi_vendor_infoframe hv_frame;
6141 struct hdmi_avi_infoframe avi_frame;
6143 memset(&hv_frame, 0, sizeof(hv_frame));
6144 memset(&avi_frame, 0, sizeof(avi_frame));
6146 timing_out->h_border_left = 0;
6147 timing_out->h_border_right = 0;
6148 timing_out->v_border_top = 0;
6149 timing_out->v_border_bottom = 0;
6150 /* TODO: un-hardcode */
6151 if (drm_mode_is_420_only(info, mode_in)
6152 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6153 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6154 else if (drm_mode_is_420_also(info, mode_in)
6155 && aconnector->force_yuv420_output)
6156 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6157 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
6158 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6159 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
6161 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
6163 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
6164 timing_out->display_color_depth = convert_color_depth_from_display_info(
6166 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
6168 timing_out->scan_type = SCANNING_TYPE_NODATA;
6169 timing_out->hdmi_vic = 0;
6172 timing_out->vic = old_stream->timing.vic;
6173 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
6174 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
6176 timing_out->vic = drm_match_cea_mode(mode_in);
6177 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
6178 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
6179 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
6180 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
6183 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6184 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
6185 timing_out->vic = avi_frame.video_code;
6186 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
6187 timing_out->hdmi_vic = hv_frame.vic;
6190 if (is_freesync_video_mode(mode_in, aconnector)) {
6191 timing_out->h_addressable = mode_in->hdisplay;
6192 timing_out->h_total = mode_in->htotal;
6193 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
6194 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
6195 timing_out->v_total = mode_in->vtotal;
6196 timing_out->v_addressable = mode_in->vdisplay;
6197 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
6198 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
6199 timing_out->pix_clk_100hz = mode_in->clock * 10;
6201 timing_out->h_addressable = mode_in->crtc_hdisplay;
6202 timing_out->h_total = mode_in->crtc_htotal;
6203 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
6204 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
6205 timing_out->v_total = mode_in->crtc_vtotal;
6206 timing_out->v_addressable = mode_in->crtc_vdisplay;
6207 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
6208 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
6209 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
6212 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
6214 stream->output_color_space = get_output_color_space(timing_out);
6216 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
6217 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
6218 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6219 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
6220 drm_mode_is_420_also(info, mode_in) &&
6221 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
6222 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6223 adjust_colour_depth_from_display_info(timing_out, info);
6228 static void fill_audio_info(struct audio_info *audio_info,
6229 const struct drm_connector *drm_connector,
6230 const struct dc_sink *dc_sink)
6233 int cea_revision = 0;
6234 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
6236 audio_info->manufacture_id = edid_caps->manufacturer_id;
6237 audio_info->product_id = edid_caps->product_id;
6239 cea_revision = drm_connector->display_info.cea_rev;
6241 strscpy(audio_info->display_name,
6242 edid_caps->display_name,
6243 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
6245 if (cea_revision >= 3) {
6246 audio_info->mode_count = edid_caps->audio_mode_count;
6248 for (i = 0; i < audio_info->mode_count; ++i) {
6249 audio_info->modes[i].format_code =
6250 (enum audio_format_code)
6251 (edid_caps->audio_modes[i].format_code);
6252 audio_info->modes[i].channel_count =
6253 edid_caps->audio_modes[i].channel_count;
6254 audio_info->modes[i].sample_rates.all =
6255 edid_caps->audio_modes[i].sample_rate;
6256 audio_info->modes[i].sample_size =
6257 edid_caps->audio_modes[i].sample_size;
6261 audio_info->flags.all = edid_caps->speaker_flags;
6263 /* TODO: We only check for the progressive mode, check for interlace mode too */
6264 if (drm_connector->latency_present[0]) {
6265 audio_info->video_latency = drm_connector->video_latency[0];
6266 audio_info->audio_latency = drm_connector->audio_latency[0];
6269 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6274 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6275 struct drm_display_mode *dst_mode)
6277 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6278 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6279 dst_mode->crtc_clock = src_mode->crtc_clock;
6280 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6281 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
6282 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
6283 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6284 dst_mode->crtc_htotal = src_mode->crtc_htotal;
6285 dst_mode->crtc_hskew = src_mode->crtc_hskew;
6286 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6287 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6288 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6289 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6290 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6294 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6295 const struct drm_display_mode *native_mode,
6298 if (scale_enabled) {
6299 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6300 } else if (native_mode->clock == drm_mode->clock &&
6301 native_mode->htotal == drm_mode->htotal &&
6302 native_mode->vtotal == drm_mode->vtotal) {
6303 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6305 /* no scaling nor amdgpu inserted, no need to patch */
6309 static struct dc_sink *
6310 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6312 struct dc_sink_init_data sink_init_data = { 0 };
6313 struct dc_sink *sink = NULL;
6314 sink_init_data.link = aconnector->dc_link;
6315 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6317 sink = dc_sink_create(&sink_init_data);
6319 DRM_ERROR("Failed to create sink!\n");
6322 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6327 static void set_multisync_trigger_params(
6328 struct dc_stream_state *stream)
6330 struct dc_stream_state *master = NULL;
6332 if (stream->triggered_crtc_reset.enabled) {
6333 master = stream->triggered_crtc_reset.event_source;
6334 stream->triggered_crtc_reset.event =
6335 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6336 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6337 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6341 static void set_master_stream(struct dc_stream_state *stream_set[],
6344 int j, highest_rfr = 0, master_stream = 0;
6346 for (j = 0; j < stream_count; j++) {
6347 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6348 int refresh_rate = 0;
6350 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6351 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6352 if (refresh_rate > highest_rfr) {
6353 highest_rfr = refresh_rate;
6358 for (j = 0; j < stream_count; j++) {
6360 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6364 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6367 struct dc_stream_state *stream;
6369 if (context->stream_count < 2)
6371 for (i = 0; i < context->stream_count ; i++) {
6372 if (!context->streams[i])
6375 * TODO: add a function to read AMD VSDB bits and set
6376 * crtc_sync_master.multi_sync_enabled flag
6377 * For now it's set to false
6381 set_master_stream(context->streams, context->stream_count);
6383 for (i = 0; i < context->stream_count ; i++) {
6384 stream = context->streams[i];
6389 set_multisync_trigger_params(stream);
6393 #if defined(CONFIG_DRM_AMD_DC_DCN)
6394 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6395 struct dc_sink *sink, struct dc_stream_state *stream,
6396 struct dsc_dec_dpcd_caps *dsc_caps)
6398 stream->timing.flags.DSC = 0;
6399 dsc_caps->is_dsc_supported = false;
6401 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6402 sink->sink_signal == SIGNAL_TYPE_EDP)) {
6403 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6404 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6405 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6406 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6407 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6412 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6413 struct dc_sink *sink, struct dc_stream_state *stream,
6414 struct dsc_dec_dpcd_caps *dsc_caps,
6415 uint32_t max_dsc_target_bpp_limit_override)
6417 const struct dc_link_settings *verified_link_cap = NULL;
6418 uint32_t link_bw_in_kbps;
6419 uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6420 struct dc *dc = sink->ctx->dc;
6421 struct dc_dsc_bw_range bw_range = {0};
6422 struct dc_dsc_config dsc_cfg = {0};
6424 verified_link_cap = dc_link_get_link_cap(stream->link);
6425 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6426 edp_min_bpp_x16 = 8 * 16;
6427 edp_max_bpp_x16 = 8 * 16;
6429 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6430 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6432 if (edp_max_bpp_x16 < edp_min_bpp_x16)
6433 edp_min_bpp_x16 = edp_max_bpp_x16;
6435 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6436 dc->debug.dsc_min_slice_height_override,
6437 edp_min_bpp_x16, edp_max_bpp_x16,
6442 if (bw_range.max_kbps < link_bw_in_kbps) {
6443 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6445 dc->debug.dsc_min_slice_height_override,
6446 max_dsc_target_bpp_limit_override,
6450 stream->timing.dsc_cfg = dsc_cfg;
6451 stream->timing.flags.DSC = 1;
6452 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6458 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6460 dc->debug.dsc_min_slice_height_override,
6461 max_dsc_target_bpp_limit_override,
6465 stream->timing.dsc_cfg = dsc_cfg;
6466 stream->timing.flags.DSC = 1;
6470 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6471 struct dc_sink *sink, struct dc_stream_state *stream,
6472 struct dsc_dec_dpcd_caps *dsc_caps)
6474 struct drm_connector *drm_connector = &aconnector->base;
6475 uint32_t link_bandwidth_kbps;
6476 uint32_t max_dsc_target_bpp_limit_override = 0;
6477 struct dc *dc = sink->ctx->dc;
6478 uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6479 uint32_t dsc_max_supported_bw_in_kbps;
6481 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6482 dc_link_get_link_cap(aconnector->dc_link));
6484 if (stream->link && stream->link->local_sink)
6485 max_dsc_target_bpp_limit_override =
6486 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6488 /* Set DSC policy according to dsc_clock_en */
6489 dc_dsc_policy_set_enable_dsc_when_not_needed(
6490 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6492 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6493 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6495 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6497 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6498 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6499 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6501 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6502 max_dsc_target_bpp_limit_override,
6503 link_bandwidth_kbps,
6505 &stream->timing.dsc_cfg)) {
6506 stream->timing.flags.DSC = 1;
6507 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6508 __func__, drm_connector->name);
6510 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6511 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6512 max_supported_bw_in_kbps = link_bandwidth_kbps;
6513 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6515 if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6516 max_supported_bw_in_kbps > 0 &&
6517 dsc_max_supported_bw_in_kbps > 0)
6518 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6520 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6521 max_dsc_target_bpp_limit_override,
6522 dsc_max_supported_bw_in_kbps,
6524 &stream->timing.dsc_cfg)) {
6525 stream->timing.flags.DSC = 1;
6526 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6527 __func__, drm_connector->name);
6532 /* Overwrite the stream flag if DSC is enabled through debugfs */
6533 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6534 stream->timing.flags.DSC = 1;
6536 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6537 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6539 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6540 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6542 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6543 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6545 #endif /* CONFIG_DRM_AMD_DC_DCN */
6548 * DOC: FreeSync Video
6550 * When a userspace application wants to play a video, the content follows a
6551 * standard format definition that usually specifies the FPS for that format.
6552 * The below list illustrates some video format and the expected FPS,
6555 * - TV/NTSC (23.976 FPS)
6558 * - TV/NTSC (29.97 FPS)
6559 * - TV/NTSC (30 FPS)
6560 * - Cinema HFR (48 FPS)
6562 * - Commonly used (60 FPS)
6563 * - Multiples of 24 (48,72,96,120 FPS)
6565 * The list of standards video format is not huge and can be added to the
6566 * connector modeset list beforehand. With that, userspace can leverage
6567 * FreeSync to extends the front porch in order to attain the target refresh
6568 * rate. Such a switch will happen seamlessly, without screen blanking or
6569 * reprogramming of the output in any other way. If the userspace requests a
6570 * modesetting change compatible with FreeSync modes that only differ in the
6571 * refresh rate, DC will skip the full update and avoid blink during the
6572 * transition. For example, the video player can change the modesetting from
6573 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6574 * causing any display blink. This same concept can be applied to a mode
6577 static struct drm_display_mode *
6578 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6579 bool use_probed_modes)
6581 struct drm_display_mode *m, *m_pref = NULL;
6582 u16 current_refresh, highest_refresh;
6583 struct list_head *list_head = use_probed_modes ?
6584 &aconnector->base.probed_modes :
6585 &aconnector->base.modes;
6587 if (aconnector->freesync_vid_base.clock != 0)
6588 return &aconnector->freesync_vid_base;
6590 /* Find the preferred mode */
6591 list_for_each_entry (m, list_head, head) {
6592 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6599 /* Probably an EDID with no preferred mode. Fallback to first entry */
6600 m_pref = list_first_entry_or_null(
6601 &aconnector->base.modes, struct drm_display_mode, head);
6603 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6608 highest_refresh = drm_mode_vrefresh(m_pref);
6611 * Find the mode with highest refresh rate with same resolution.
6612 * For some monitors, preferred mode is not the mode with highest
6613 * supported refresh rate.
6615 list_for_each_entry (m, list_head, head) {
6616 current_refresh = drm_mode_vrefresh(m);
6618 if (m->hdisplay == m_pref->hdisplay &&
6619 m->vdisplay == m_pref->vdisplay &&
6620 highest_refresh < current_refresh) {
6621 highest_refresh = current_refresh;
6626 drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6630 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6631 struct amdgpu_dm_connector *aconnector)
6633 struct drm_display_mode *high_mode;
6636 high_mode = get_highest_refresh_rate_mode(aconnector, false);
6637 if (!high_mode || !mode)
6640 timing_diff = high_mode->vtotal - mode->vtotal;
6642 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6643 high_mode->hdisplay != mode->hdisplay ||
6644 high_mode->vdisplay != mode->vdisplay ||
6645 high_mode->hsync_start != mode->hsync_start ||
6646 high_mode->hsync_end != mode->hsync_end ||
6647 high_mode->htotal != mode->htotal ||
6648 high_mode->hskew != mode->hskew ||
6649 high_mode->vscan != mode->vscan ||
6650 high_mode->vsync_start - mode->vsync_start != timing_diff ||
6651 high_mode->vsync_end - mode->vsync_end != timing_diff)
6657 static struct dc_stream_state *
6658 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6659 const struct drm_display_mode *drm_mode,
6660 const struct dm_connector_state *dm_state,
6661 const struct dc_stream_state *old_stream,
6664 struct drm_display_mode *preferred_mode = NULL;
6665 struct drm_connector *drm_connector;
6666 const struct drm_connector_state *con_state =
6667 dm_state ? &dm_state->base : NULL;
6668 struct dc_stream_state *stream = NULL;
6669 struct drm_display_mode mode = *drm_mode;
6670 struct drm_display_mode saved_mode;
6671 struct drm_display_mode *freesync_mode = NULL;
6672 bool native_mode_found = false;
6673 bool recalculate_timing = false;
6674 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6676 int preferred_refresh = 0;
6677 #if defined(CONFIG_DRM_AMD_DC_DCN)
6678 struct dsc_dec_dpcd_caps dsc_caps;
6680 struct dc_sink *sink = NULL;
6682 memset(&saved_mode, 0, sizeof(saved_mode));
6684 if (aconnector == NULL) {
6685 DRM_ERROR("aconnector is NULL!\n");
6689 drm_connector = &aconnector->base;
6691 if (!aconnector->dc_sink) {
6692 sink = create_fake_sink(aconnector);
6696 sink = aconnector->dc_sink;
6697 dc_sink_retain(sink);
6700 stream = dc_create_stream_for_sink(sink);
6702 if (stream == NULL) {
6703 DRM_ERROR("Failed to create stream for sink!\n");
6707 stream->dm_stream_context = aconnector;
6709 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6710 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6712 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6713 /* Search for preferred mode */
6714 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6715 native_mode_found = true;
6719 if (!native_mode_found)
6720 preferred_mode = list_first_entry_or_null(
6721 &aconnector->base.modes,
6722 struct drm_display_mode,
6725 mode_refresh = drm_mode_vrefresh(&mode);
6727 if (preferred_mode == NULL) {
6729 * This may not be an error, the use case is when we have no
6730 * usermode calls to reset and set mode upon hotplug. In this
6731 * case, we call set mode ourselves to restore the previous mode
6732 * and the modelist may not be filled in in time.
6734 DRM_DEBUG_DRIVER("No preferred mode found\n");
6736 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6737 if (recalculate_timing) {
6738 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6739 drm_mode_copy(&saved_mode, &mode);
6740 drm_mode_copy(&mode, freesync_mode);
6742 decide_crtc_timing_for_drm_display_mode(
6743 &mode, preferred_mode, scale);
6745 preferred_refresh = drm_mode_vrefresh(preferred_mode);
6749 if (recalculate_timing)
6750 drm_mode_set_crtcinfo(&saved_mode, 0);
6752 drm_mode_set_crtcinfo(&mode, 0);
6755 * If scaling is enabled and refresh rate didn't change
6756 * we copy the vic and polarities of the old timings
6758 if (!scale || mode_refresh != preferred_refresh)
6759 fill_stream_properties_from_drm_display_mode(
6760 stream, &mode, &aconnector->base, con_state, NULL,
6763 fill_stream_properties_from_drm_display_mode(
6764 stream, &mode, &aconnector->base, con_state, old_stream,
6767 #if defined(CONFIG_DRM_AMD_DC_DCN)
6768 /* SST DSC determination policy */
6769 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6770 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6771 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6774 update_stream_scaling_settings(&mode, dm_state, stream);
6777 &stream->audio_info,
6781 update_stream_signal(stream, sink);
6783 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6784 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6786 if (stream->link->psr_settings.psr_feature_enabled) {
6788 // should decide stream support vsc sdp colorimetry capability
6789 // before building vsc info packet
6791 stream->use_vsc_sdp_for_colorimetry = false;
6792 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6793 stream->use_vsc_sdp_for_colorimetry =
6794 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6796 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6797 stream->use_vsc_sdp_for_colorimetry = true;
6799 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6800 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6804 dc_sink_release(sink);
6809 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6811 drm_crtc_cleanup(crtc);
6815 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6816 struct drm_crtc_state *state)
6818 struct dm_crtc_state *cur = to_dm_crtc_state(state);
6820 /* TODO Destroy dc_stream objects are stream object is flattened */
6822 dc_stream_release(cur->stream);
6825 __drm_atomic_helper_crtc_destroy_state(state);
6831 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6833 struct dm_crtc_state *state;
6836 dm_crtc_destroy_state(crtc, crtc->state);
6838 state = kzalloc(sizeof(*state), GFP_KERNEL);
6839 if (WARN_ON(!state))
6842 __drm_atomic_helper_crtc_reset(crtc, &state->base);
6845 static struct drm_crtc_state *
6846 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6848 struct dm_crtc_state *state, *cur;
6850 cur = to_dm_crtc_state(crtc->state);
6852 if (WARN_ON(!crtc->state))
6855 state = kzalloc(sizeof(*state), GFP_KERNEL);
6859 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6862 state->stream = cur->stream;
6863 dc_stream_retain(state->stream);
6866 state->active_planes = cur->active_planes;
6867 state->vrr_infopacket = cur->vrr_infopacket;
6868 state->abm_level = cur->abm_level;
6869 state->vrr_supported = cur->vrr_supported;
6870 state->freesync_config = cur->freesync_config;
6871 state->cm_has_degamma = cur->cm_has_degamma;
6872 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6873 state->mpo_requested = cur->mpo_requested;
6874 /* TODO Duplicate dc_stream after objects are stream object is flattened */
6876 return &state->base;
6879 #ifdef CONFIG_DEBUG_FS
6880 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6882 crtc_debugfs_init(crtc);
6888 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6890 enum dc_irq_source irq_source;
6891 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6892 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6895 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6897 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6899 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6900 acrtc->crtc_id, enable ? "en" : "dis", rc);
6904 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6906 enum dc_irq_source irq_source;
6907 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6908 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6909 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6910 struct amdgpu_display_manager *dm = &adev->dm;
6911 struct vblank_control_work *work;
6915 /* vblank irq on -> Only need vupdate irq in vrr mode */
6916 if (amdgpu_dm_vrr_active(acrtc_state))
6917 rc = dm_set_vupdate_irq(crtc, true);
6919 /* vblank irq off -> vupdate irq off */
6920 rc = dm_set_vupdate_irq(crtc, false);
6926 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6928 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6931 if (amdgpu_in_reset(adev))
6934 if (dm->vblank_control_workqueue) {
6935 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6939 INIT_WORK(&work->work, vblank_control_worker);
6941 work->acrtc = acrtc;
6942 work->enable = enable;
6944 if (acrtc_state->stream) {
6945 dc_stream_retain(acrtc_state->stream);
6946 work->stream = acrtc_state->stream;
6949 queue_work(dm->vblank_control_workqueue, &work->work);
6955 static int dm_enable_vblank(struct drm_crtc *crtc)
6957 return dm_set_vblank(crtc, true);
6960 static void dm_disable_vblank(struct drm_crtc *crtc)
6962 dm_set_vblank(crtc, false);
6965 /* Implemented only the options currently available for the driver */
6966 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6967 .reset = dm_crtc_reset_state,
6968 .destroy = amdgpu_dm_crtc_destroy,
6969 .set_config = drm_atomic_helper_set_config,
6970 .page_flip = drm_atomic_helper_page_flip,
6971 .atomic_duplicate_state = dm_crtc_duplicate_state,
6972 .atomic_destroy_state = dm_crtc_destroy_state,
6973 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6974 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6975 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6976 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6977 .enable_vblank = dm_enable_vblank,
6978 .disable_vblank = dm_disable_vblank,
6979 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6980 #if defined(CONFIG_DEBUG_FS)
6981 .late_register = amdgpu_dm_crtc_late_register,
6985 static enum drm_connector_status
6986 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6989 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6993 * 1. This interface is NOT called in context of HPD irq.
6994 * 2. This interface *is called* in context of user-mode ioctl. Which
6995 * makes it a bad place for *any* MST-related activity.
6998 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6999 !aconnector->fake_enable)
7000 connected = (aconnector->dc_sink != NULL);
7002 connected = (aconnector->base.force == DRM_FORCE_ON);
7004 update_subconnector_property(aconnector);
7006 return (connected ? connector_status_connected :
7007 connector_status_disconnected);
7010 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
7011 struct drm_connector_state *connector_state,
7012 struct drm_property *property,
7015 struct drm_device *dev = connector->dev;
7016 struct amdgpu_device *adev = drm_to_adev(dev);
7017 struct dm_connector_state *dm_old_state =
7018 to_dm_connector_state(connector->state);
7019 struct dm_connector_state *dm_new_state =
7020 to_dm_connector_state(connector_state);
7024 if (property == dev->mode_config.scaling_mode_property) {
7025 enum amdgpu_rmx_type rmx_type;
7028 case DRM_MODE_SCALE_CENTER:
7029 rmx_type = RMX_CENTER;
7031 case DRM_MODE_SCALE_ASPECT:
7032 rmx_type = RMX_ASPECT;
7034 case DRM_MODE_SCALE_FULLSCREEN:
7035 rmx_type = RMX_FULL;
7037 case DRM_MODE_SCALE_NONE:
7043 if (dm_old_state->scaling == rmx_type)
7046 dm_new_state->scaling = rmx_type;
7048 } else if (property == adev->mode_info.underscan_hborder_property) {
7049 dm_new_state->underscan_hborder = val;
7051 } else if (property == adev->mode_info.underscan_vborder_property) {
7052 dm_new_state->underscan_vborder = val;
7054 } else if (property == adev->mode_info.underscan_property) {
7055 dm_new_state->underscan_enable = val;
7057 } else if (property == adev->mode_info.abm_level_property) {
7058 dm_new_state->abm_level = val;
7065 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
7066 const struct drm_connector_state *state,
7067 struct drm_property *property,
7070 struct drm_device *dev = connector->dev;
7071 struct amdgpu_device *adev = drm_to_adev(dev);
7072 struct dm_connector_state *dm_state =
7073 to_dm_connector_state(state);
7076 if (property == dev->mode_config.scaling_mode_property) {
7077 switch (dm_state->scaling) {
7079 *val = DRM_MODE_SCALE_CENTER;
7082 *val = DRM_MODE_SCALE_ASPECT;
7085 *val = DRM_MODE_SCALE_FULLSCREEN;
7089 *val = DRM_MODE_SCALE_NONE;
7093 } else if (property == adev->mode_info.underscan_hborder_property) {
7094 *val = dm_state->underscan_hborder;
7096 } else if (property == adev->mode_info.underscan_vborder_property) {
7097 *val = dm_state->underscan_vborder;
7099 } else if (property == adev->mode_info.underscan_property) {
7100 *val = dm_state->underscan_enable;
7102 } else if (property == adev->mode_info.abm_level_property) {
7103 *val = dm_state->abm_level;
7110 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
7112 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
7114 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
7117 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
7119 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7120 const struct dc_link *link = aconnector->dc_link;
7121 struct amdgpu_device *adev = drm_to_adev(connector->dev);
7122 struct amdgpu_display_manager *dm = &adev->dm;
7126 * Call only if mst_mgr was iniitalized before since it's not done
7127 * for all connector types.
7129 if (aconnector->mst_mgr.dev)
7130 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
7132 for (i = 0; i < dm->num_of_edps; i++) {
7133 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
7134 backlight_device_unregister(dm->backlight_dev[i]);
7135 dm->backlight_dev[i] = NULL;
7139 if (aconnector->dc_em_sink)
7140 dc_sink_release(aconnector->dc_em_sink);
7141 aconnector->dc_em_sink = NULL;
7142 if (aconnector->dc_sink)
7143 dc_sink_release(aconnector->dc_sink);
7144 aconnector->dc_sink = NULL;
7146 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
7147 drm_connector_unregister(connector);
7148 drm_connector_cleanup(connector);
7149 if (aconnector->i2c) {
7150 i2c_del_adapter(&aconnector->i2c->base);
7151 kfree(aconnector->i2c);
7153 kfree(aconnector->dm_dp_aux.aux.name);
7158 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
7160 struct dm_connector_state *state =
7161 to_dm_connector_state(connector->state);
7163 if (connector->state)
7164 __drm_atomic_helper_connector_destroy_state(connector->state);
7168 state = kzalloc(sizeof(*state), GFP_KERNEL);
7171 state->scaling = RMX_OFF;
7172 state->underscan_enable = false;
7173 state->underscan_hborder = 0;
7174 state->underscan_vborder = 0;
7175 state->base.max_requested_bpc = 8;
7176 state->vcpi_slots = 0;
7178 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
7179 state->abm_level = amdgpu_dm_abm_level;
7181 __drm_atomic_helper_connector_reset(connector, &state->base);
7185 struct drm_connector_state *
7186 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
7188 struct dm_connector_state *state =
7189 to_dm_connector_state(connector->state);
7191 struct dm_connector_state *new_state =
7192 kmemdup(state, sizeof(*state), GFP_KERNEL);
7197 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
7199 new_state->freesync_capable = state->freesync_capable;
7200 new_state->abm_level = state->abm_level;
7201 new_state->scaling = state->scaling;
7202 new_state->underscan_enable = state->underscan_enable;
7203 new_state->underscan_hborder = state->underscan_hborder;
7204 new_state->underscan_vborder = state->underscan_vborder;
7205 new_state->vcpi_slots = state->vcpi_slots;
7206 new_state->pbn = state->pbn;
7207 return &new_state->base;
7211 amdgpu_dm_connector_late_register(struct drm_connector *connector)
7213 struct amdgpu_dm_connector *amdgpu_dm_connector =
7214 to_amdgpu_dm_connector(connector);
7217 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
7218 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
7219 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
7220 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
7225 #if defined(CONFIG_DEBUG_FS)
7226 connector_debugfs_init(amdgpu_dm_connector);
7232 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
7233 .reset = amdgpu_dm_connector_funcs_reset,
7234 .detect = amdgpu_dm_connector_detect,
7235 .fill_modes = drm_helper_probe_single_connector_modes,
7236 .destroy = amdgpu_dm_connector_destroy,
7237 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
7238 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
7239 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
7240 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
7241 .late_register = amdgpu_dm_connector_late_register,
7242 .early_unregister = amdgpu_dm_connector_unregister
7245 static int get_modes(struct drm_connector *connector)
7247 return amdgpu_dm_connector_get_modes(connector);
7250 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
7252 struct dc_sink_init_data init_params = {
7253 .link = aconnector->dc_link,
7254 .sink_signal = SIGNAL_TYPE_VIRTUAL
7258 if (!aconnector->base.edid_blob_ptr) {
7259 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7260 aconnector->base.name);
7262 aconnector->base.force = DRM_FORCE_OFF;
7263 aconnector->base.override_edid = false;
7267 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7269 aconnector->edid = edid;
7271 aconnector->dc_em_sink = dc_link_add_remote_sink(
7272 aconnector->dc_link,
7274 (edid->extensions + 1) * EDID_LENGTH,
7277 if (aconnector->base.force == DRM_FORCE_ON) {
7278 aconnector->dc_sink = aconnector->dc_link->local_sink ?
7279 aconnector->dc_link->local_sink :
7280 aconnector->dc_em_sink;
7281 dc_sink_retain(aconnector->dc_sink);
7285 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7287 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7290 * In case of headless boot with force on for DP managed connector
7291 * Those settings have to be != 0 to get initial modeset
7293 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7294 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7295 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7299 aconnector->base.override_edid = true;
7300 create_eml_sink(aconnector);
7303 struct dc_stream_state *
7304 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7305 const struct drm_display_mode *drm_mode,
7306 const struct dm_connector_state *dm_state,
7307 const struct dc_stream_state *old_stream)
7309 struct drm_connector *connector = &aconnector->base;
7310 struct amdgpu_device *adev = drm_to_adev(connector->dev);
7311 struct dc_stream_state *stream;
7312 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7313 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7314 enum dc_status dc_result = DC_OK;
7317 stream = create_stream_for_sink(aconnector, drm_mode,
7318 dm_state, old_stream,
7320 if (stream == NULL) {
7321 DRM_ERROR("Failed to create stream for sink!\n");
7325 dc_result = dc_validate_stream(adev->dm.dc, stream);
7326 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
7327 dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
7329 if (dc_result != DC_OK) {
7330 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7335 dc_status_to_str(dc_result));
7337 dc_stream_release(stream);
7339 requested_bpc -= 2; /* lower bpc to retry validation */
7342 } while (stream == NULL && requested_bpc >= 6);
7344 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7345 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7347 aconnector->force_yuv420_output = true;
7348 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7349 dm_state, old_stream);
7350 aconnector->force_yuv420_output = false;
7356 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7357 struct drm_display_mode *mode)
7359 int result = MODE_ERROR;
7360 struct dc_sink *dc_sink;
7361 /* TODO: Unhardcode stream count */
7362 struct dc_stream_state *stream;
7363 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7365 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7366 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7370 * Only run this the first time mode_valid is called to initilialize
7373 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7374 !aconnector->dc_em_sink)
7375 handle_edid_mgmt(aconnector);
7377 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7379 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7380 aconnector->base.force != DRM_FORCE_ON) {
7381 DRM_ERROR("dc_sink is NULL!\n");
7385 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7387 dc_stream_release(stream);
7392 /* TODO: error handling*/
7396 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7397 struct dc_info_packet *out)
7399 struct hdmi_drm_infoframe frame;
7400 unsigned char buf[30]; /* 26 + 4 */
7404 memset(out, 0, sizeof(*out));
7406 if (!state->hdr_output_metadata)
7409 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7413 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7417 /* Static metadata is a fixed 26 bytes + 4 byte header. */
7421 /* Prepare the infopacket for DC. */
7422 switch (state->connector->connector_type) {
7423 case DRM_MODE_CONNECTOR_HDMIA:
7424 out->hb0 = 0x87; /* type */
7425 out->hb1 = 0x01; /* version */
7426 out->hb2 = 0x1A; /* length */
7427 out->sb[0] = buf[3]; /* checksum */
7431 case DRM_MODE_CONNECTOR_DisplayPort:
7432 case DRM_MODE_CONNECTOR_eDP:
7433 out->hb0 = 0x00; /* sdp id, zero */
7434 out->hb1 = 0x87; /* type */
7435 out->hb2 = 0x1D; /* payload len - 1 */
7436 out->hb3 = (0x13 << 2); /* sdp version */
7437 out->sb[0] = 0x01; /* version */
7438 out->sb[1] = 0x1A; /* length */
7446 memcpy(&out->sb[i], &buf[4], 26);
7449 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7450 sizeof(out->sb), false);
7456 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7457 struct drm_atomic_state *state)
7459 struct drm_connector_state *new_con_state =
7460 drm_atomic_get_new_connector_state(state, conn);
7461 struct drm_connector_state *old_con_state =
7462 drm_atomic_get_old_connector_state(state, conn);
7463 struct drm_crtc *crtc = new_con_state->crtc;
7464 struct drm_crtc_state *new_crtc_state;
7467 trace_amdgpu_dm_connector_atomic_check(new_con_state);
7472 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7473 struct dc_info_packet hdr_infopacket;
7475 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7479 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7480 if (IS_ERR(new_crtc_state))
7481 return PTR_ERR(new_crtc_state);
7484 * DC considers the stream backends changed if the
7485 * static metadata changes. Forcing the modeset also
7486 * gives a simple way for userspace to switch from
7487 * 8bpc to 10bpc when setting the metadata to enter
7490 * Changing the static metadata after it's been
7491 * set is permissible, however. So only force a
7492 * modeset if we're entering or exiting HDR.
7494 new_crtc_state->mode_changed =
7495 !old_con_state->hdr_output_metadata ||
7496 !new_con_state->hdr_output_metadata;
7502 static const struct drm_connector_helper_funcs
7503 amdgpu_dm_connector_helper_funcs = {
7505 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7506 * modes will be filtered by drm_mode_validate_size(), and those modes
7507 * are missing after user start lightdm. So we need to renew modes list.
7508 * in get_modes call back, not just return the modes count
7510 .get_modes = get_modes,
7511 .mode_valid = amdgpu_dm_connector_mode_valid,
7512 .atomic_check = amdgpu_dm_connector_atomic_check,
7515 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7519 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7521 struct drm_atomic_state *state = new_crtc_state->state;
7522 struct drm_plane *plane;
7525 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7526 struct drm_plane_state *new_plane_state;
7528 /* Cursor planes are "fake". */
7529 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7532 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7534 if (!new_plane_state) {
7536 * The plane is enable on the CRTC and hasn't changed
7537 * state. This means that it previously passed
7538 * validation and is therefore enabled.
7544 /* We need a framebuffer to be considered enabled. */
7545 num_active += (new_plane_state->fb != NULL);
7551 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7552 struct drm_crtc_state *new_crtc_state)
7554 struct dm_crtc_state *dm_new_crtc_state =
7555 to_dm_crtc_state(new_crtc_state);
7557 dm_new_crtc_state->active_planes = 0;
7559 if (!dm_new_crtc_state->stream)
7562 dm_new_crtc_state->active_planes =
7563 count_crtc_active_planes(new_crtc_state);
7566 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7567 struct drm_atomic_state *state)
7569 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7571 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7572 struct dc *dc = adev->dm.dc;
7573 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7576 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7578 dm_update_crtc_active_planes(crtc, crtc_state);
7580 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7581 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7586 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7587 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7588 * planes are disabled, which is not supported by the hardware. And there is legacy
7589 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7591 if (crtc_state->enable &&
7592 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7593 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7597 /* In some use cases, like reset, no stream is attached */
7598 if (!dm_crtc_state->stream)
7601 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7604 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7608 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7609 const struct drm_display_mode *mode,
7610 struct drm_display_mode *adjusted_mode)
7615 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7616 .disable = dm_crtc_helper_disable,
7617 .atomic_check = dm_crtc_helper_atomic_check,
7618 .mode_fixup = dm_crtc_helper_mode_fixup,
7619 .get_scanout_position = amdgpu_crtc_get_scanout_position,
7622 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7627 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
7629 switch (display_color_depth) {
7630 case COLOR_DEPTH_666:
7632 case COLOR_DEPTH_888:
7634 case COLOR_DEPTH_101010:
7636 case COLOR_DEPTH_121212:
7638 case COLOR_DEPTH_141414:
7640 case COLOR_DEPTH_161616:
7648 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7649 struct drm_crtc_state *crtc_state,
7650 struct drm_connector_state *conn_state)
7652 struct drm_atomic_state *state = crtc_state->state;
7653 struct drm_connector *connector = conn_state->connector;
7654 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7655 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7656 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7657 struct drm_dp_mst_topology_mgr *mst_mgr;
7658 struct drm_dp_mst_port *mst_port;
7659 enum dc_color_depth color_depth;
7661 bool is_y420 = false;
7663 if (!aconnector->port || !aconnector->dc_sink)
7666 mst_port = aconnector->port;
7667 mst_mgr = &aconnector->mst_port->mst_mgr;
7669 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7672 if (!state->duplicated) {
7673 int max_bpc = conn_state->max_requested_bpc;
7674 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7675 aconnector->force_yuv420_output;
7676 color_depth = convert_color_depth_from_display_info(connector,
7679 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7680 clock = adjusted_mode->clock;
7681 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7683 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7686 dm_new_connector_state->pbn,
7687 dm_mst_get_pbn_divider(aconnector->dc_link));
7688 if (dm_new_connector_state->vcpi_slots < 0) {
7689 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7690 return dm_new_connector_state->vcpi_slots;
7695 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7696 .disable = dm_encoder_helper_disable,
7697 .atomic_check = dm_encoder_helper_atomic_check
7700 #if defined(CONFIG_DRM_AMD_DC_DCN)
7701 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7702 struct dc_state *dc_state,
7703 struct dsc_mst_fairness_vars *vars)
7705 struct dc_stream_state *stream = NULL;
7706 struct drm_connector *connector;
7707 struct drm_connector_state *new_con_state;
7708 struct amdgpu_dm_connector *aconnector;
7709 struct dm_connector_state *dm_conn_state;
7711 int vcpi, pbn_div, pbn, slot_num = 0;
7713 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7715 aconnector = to_amdgpu_dm_connector(connector);
7717 if (!aconnector->port)
7720 if (!new_con_state || !new_con_state->crtc)
7723 dm_conn_state = to_dm_connector_state(new_con_state);
7725 for (j = 0; j < dc_state->stream_count; j++) {
7726 stream = dc_state->streams[j];
7730 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7739 pbn_div = dm_mst_get_pbn_divider(stream->link);
7740 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7741 for (j = 0; j < dc_state->stream_count; j++) {
7742 if (vars[j].aconnector == aconnector) {
7748 if (j == dc_state->stream_count)
7751 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7753 if (stream->timing.flags.DSC != 1) {
7754 dm_conn_state->pbn = pbn;
7755 dm_conn_state->vcpi_slots = slot_num;
7757 drm_dp_mst_atomic_enable_dsc(state,
7765 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7772 dm_conn_state->pbn = pbn;
7773 dm_conn_state->vcpi_slots = vcpi;
7779 static void dm_drm_plane_reset(struct drm_plane *plane)
7781 struct dm_plane_state *amdgpu_state = NULL;
7784 plane->funcs->atomic_destroy_state(plane, plane->state);
7786 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7787 WARN_ON(amdgpu_state == NULL);
7790 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7793 static struct drm_plane_state *
7794 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7796 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7798 old_dm_plane_state = to_dm_plane_state(plane->state);
7799 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7800 if (!dm_plane_state)
7803 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7805 if (old_dm_plane_state->dc_state) {
7806 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7807 dc_plane_state_retain(dm_plane_state->dc_state);
7810 return &dm_plane_state->base;
7813 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7814 struct drm_plane_state *state)
7816 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7818 if (dm_plane_state->dc_state)
7819 dc_plane_state_release(dm_plane_state->dc_state);
7821 drm_atomic_helper_plane_destroy_state(plane, state);
7824 static const struct drm_plane_funcs dm_plane_funcs = {
7825 .update_plane = drm_atomic_helper_update_plane,
7826 .disable_plane = drm_atomic_helper_disable_plane,
7827 .destroy = drm_primary_helper_destroy,
7828 .reset = dm_drm_plane_reset,
7829 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7830 .atomic_destroy_state = dm_drm_plane_destroy_state,
7831 .format_mod_supported = dm_plane_format_mod_supported,
7834 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7835 struct drm_plane_state *new_state)
7837 struct amdgpu_framebuffer *afb;
7838 struct drm_gem_object *obj;
7839 struct amdgpu_device *adev;
7840 struct amdgpu_bo *rbo;
7841 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7845 if (!new_state->fb) {
7846 DRM_DEBUG_KMS("No FB bound\n");
7850 afb = to_amdgpu_framebuffer(new_state->fb);
7851 obj = new_state->fb->obj[0];
7852 rbo = gem_to_amdgpu_bo(obj);
7853 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7855 r = amdgpu_bo_reserve(rbo, true);
7857 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7861 r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
7863 dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
7867 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7868 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7870 domain = AMDGPU_GEM_DOMAIN_VRAM;
7872 r = amdgpu_bo_pin(rbo, domain);
7873 if (unlikely(r != 0)) {
7874 if (r != -ERESTARTSYS)
7875 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7879 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7880 if (unlikely(r != 0)) {
7881 DRM_ERROR("%p bind failed\n", rbo);
7885 r = drm_gem_plane_helper_prepare_fb(plane, new_state);
7886 if (unlikely(r != 0))
7889 amdgpu_bo_unreserve(rbo);
7891 afb->address = amdgpu_bo_gpu_offset(rbo);
7896 * We don't do surface updates on planes that have been newly created,
7897 * but we also don't have the afb->address during atomic check.
7899 * Fill in buffer attributes depending on the address here, but only on
7900 * newly created planes since they're not being used by DC yet and this
7901 * won't modify global state.
7903 dm_plane_state_old = to_dm_plane_state(plane->state);
7904 dm_plane_state_new = to_dm_plane_state(new_state);
7906 if (dm_plane_state_new->dc_state &&
7907 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7908 struct dc_plane_state *plane_state =
7909 dm_plane_state_new->dc_state;
7910 bool force_disable_dcc = !plane_state->dcc.enable;
7912 fill_plane_buffer_attributes(
7913 adev, afb, plane_state->format, plane_state->rotation,
7915 &plane_state->tiling_info, &plane_state->plane_size,
7916 &plane_state->dcc, &plane_state->address,
7917 afb->tmz_surface, force_disable_dcc);
7923 amdgpu_bo_unpin(rbo);
7926 amdgpu_bo_unreserve(rbo);
7930 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7931 struct drm_plane_state *old_state)
7933 struct amdgpu_bo *rbo;
7939 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7940 r = amdgpu_bo_reserve(rbo, false);
7942 DRM_ERROR("failed to reserve rbo before unpin\n");
7946 amdgpu_bo_unpin(rbo);
7947 amdgpu_bo_unreserve(rbo);
7948 amdgpu_bo_unref(&rbo);
7951 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7952 struct drm_crtc_state *new_crtc_state)
7954 struct drm_framebuffer *fb = state->fb;
7955 int min_downscale, max_upscale;
7957 int max_scale = INT_MAX;
7959 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7960 if (fb && state->crtc) {
7961 /* Validate viewport to cover the case when only the position changes */
7962 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7963 int viewport_width = state->crtc_w;
7964 int viewport_height = state->crtc_h;
7966 if (state->crtc_x < 0)
7967 viewport_width += state->crtc_x;
7968 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7969 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7971 if (state->crtc_y < 0)
7972 viewport_height += state->crtc_y;
7973 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7974 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7976 if (viewport_width < 0 || viewport_height < 0) {
7977 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7979 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7980 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7982 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7983 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7989 /* Get min/max allowed scaling factors from plane caps. */
7990 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7991 &min_downscale, &max_upscale);
7993 * Convert to drm convention: 16.16 fixed point, instead of dc's
7994 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7995 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7997 min_scale = (1000 << 16) / max_upscale;
7998 max_scale = (1000 << 16) / min_downscale;
8001 return drm_atomic_helper_check_plane_state(
8002 state, new_crtc_state, min_scale, max_scale, true, true);
8005 static int dm_plane_atomic_check(struct drm_plane *plane,
8006 struct drm_atomic_state *state)
8008 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
8010 struct amdgpu_device *adev = drm_to_adev(plane->dev);
8011 struct dc *dc = adev->dm.dc;
8012 struct dm_plane_state *dm_plane_state;
8013 struct dc_scaling_info scaling_info;
8014 struct drm_crtc_state *new_crtc_state;
8017 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
8019 dm_plane_state = to_dm_plane_state(new_plane_state);
8021 if (!dm_plane_state->dc_state)
8025 drm_atomic_get_new_crtc_state(state,
8026 new_plane_state->crtc);
8027 if (!new_crtc_state)
8030 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8034 ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
8038 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
8044 static int dm_plane_atomic_async_check(struct drm_plane *plane,
8045 struct drm_atomic_state *state)
8047 /* Only support async updates on cursor planes. */
8048 if (plane->type != DRM_PLANE_TYPE_CURSOR)
8054 static void dm_plane_atomic_async_update(struct drm_plane *plane,
8055 struct drm_atomic_state *state)
8057 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
8059 struct drm_plane_state *old_state =
8060 drm_atomic_get_old_plane_state(state, plane);
8062 trace_amdgpu_dm_atomic_update_cursor(new_state);
8064 swap(plane->state->fb, new_state->fb);
8066 plane->state->src_x = new_state->src_x;
8067 plane->state->src_y = new_state->src_y;
8068 plane->state->src_w = new_state->src_w;
8069 plane->state->src_h = new_state->src_h;
8070 plane->state->crtc_x = new_state->crtc_x;
8071 plane->state->crtc_y = new_state->crtc_y;
8072 plane->state->crtc_w = new_state->crtc_w;
8073 plane->state->crtc_h = new_state->crtc_h;
8075 handle_cursor_update(plane, old_state);
8078 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
8079 .prepare_fb = dm_plane_helper_prepare_fb,
8080 .cleanup_fb = dm_plane_helper_cleanup_fb,
8081 .atomic_check = dm_plane_atomic_check,
8082 .atomic_async_check = dm_plane_atomic_async_check,
8083 .atomic_async_update = dm_plane_atomic_async_update
8087 * TODO: these are currently initialized to rgb formats only.
8088 * For future use cases we should either initialize them dynamically based on
8089 * plane capabilities, or initialize this array to all formats, so internal drm
8090 * check will succeed, and let DC implement proper check
8092 static const uint32_t rgb_formats[] = {
8093 DRM_FORMAT_XRGB8888,
8094 DRM_FORMAT_ARGB8888,
8095 DRM_FORMAT_RGBA8888,
8096 DRM_FORMAT_XRGB2101010,
8097 DRM_FORMAT_XBGR2101010,
8098 DRM_FORMAT_ARGB2101010,
8099 DRM_FORMAT_ABGR2101010,
8100 DRM_FORMAT_XRGB16161616,
8101 DRM_FORMAT_XBGR16161616,
8102 DRM_FORMAT_ARGB16161616,
8103 DRM_FORMAT_ABGR16161616,
8104 DRM_FORMAT_XBGR8888,
8105 DRM_FORMAT_ABGR8888,
8109 static const uint32_t overlay_formats[] = {
8110 DRM_FORMAT_XRGB8888,
8111 DRM_FORMAT_ARGB8888,
8112 DRM_FORMAT_RGBA8888,
8113 DRM_FORMAT_XBGR8888,
8114 DRM_FORMAT_ABGR8888,
8118 static const u32 cursor_formats[] = {
8122 static int get_plane_formats(const struct drm_plane *plane,
8123 const struct dc_plane_cap *plane_cap,
8124 uint32_t *formats, int max_formats)
8126 int i, num_formats = 0;
8129 * TODO: Query support for each group of formats directly from
8130 * DC plane caps. This will require adding more formats to the
8134 switch (plane->type) {
8135 case DRM_PLANE_TYPE_PRIMARY:
8136 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
8137 if (num_formats >= max_formats)
8140 formats[num_formats++] = rgb_formats[i];
8143 if (plane_cap && plane_cap->pixel_format_support.nv12)
8144 formats[num_formats++] = DRM_FORMAT_NV12;
8145 if (plane_cap && plane_cap->pixel_format_support.p010)
8146 formats[num_formats++] = DRM_FORMAT_P010;
8147 if (plane_cap && plane_cap->pixel_format_support.fp16) {
8148 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
8149 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
8150 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
8151 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
8155 case DRM_PLANE_TYPE_OVERLAY:
8156 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
8157 if (num_formats >= max_formats)
8160 formats[num_formats++] = overlay_formats[i];
8164 case DRM_PLANE_TYPE_CURSOR:
8165 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
8166 if (num_formats >= max_formats)
8169 formats[num_formats++] = cursor_formats[i];
8177 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
8178 struct drm_plane *plane,
8179 unsigned long possible_crtcs,
8180 const struct dc_plane_cap *plane_cap)
8182 uint32_t formats[32];
8185 unsigned int supported_rotations;
8186 uint64_t *modifiers = NULL;
8188 num_formats = get_plane_formats(plane, plane_cap, formats,
8189 ARRAY_SIZE(formats));
8191 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
8195 if (modifiers == NULL)
8196 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
8198 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
8199 &dm_plane_funcs, formats, num_formats,
8200 modifiers, plane->type, NULL);
8205 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
8206 plane_cap && plane_cap->per_pixel_alpha) {
8207 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
8208 BIT(DRM_MODE_BLEND_PREMULTI) |
8209 BIT(DRM_MODE_BLEND_COVERAGE);
8211 drm_plane_create_alpha_property(plane);
8212 drm_plane_create_blend_mode_property(plane, blend_caps);
8215 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
8217 (plane_cap->pixel_format_support.nv12 ||
8218 plane_cap->pixel_format_support.p010)) {
8219 /* This only affects YUV formats. */
8220 drm_plane_create_color_properties(
8222 BIT(DRM_COLOR_YCBCR_BT601) |
8223 BIT(DRM_COLOR_YCBCR_BT709) |
8224 BIT(DRM_COLOR_YCBCR_BT2020),
8225 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
8226 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
8227 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
8230 supported_rotations =
8231 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
8232 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
8234 if (dm->adev->asic_type >= CHIP_BONAIRE &&
8235 plane->type != DRM_PLANE_TYPE_CURSOR)
8236 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
8237 supported_rotations);
8239 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
8241 /* Create (reset) the plane state */
8242 if (plane->funcs->reset)
8243 plane->funcs->reset(plane);
8248 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
8249 struct drm_plane *plane,
8250 uint32_t crtc_index)
8252 struct amdgpu_crtc *acrtc = NULL;
8253 struct drm_plane *cursor_plane;
8257 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
8261 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
8262 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
8264 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
8268 res = drm_crtc_init_with_planes(
8273 &amdgpu_dm_crtc_funcs, NULL);
8278 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8280 /* Create (reset) the plane state */
8281 if (acrtc->base.funcs->reset)
8282 acrtc->base.funcs->reset(&acrtc->base);
8284 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8285 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8287 acrtc->crtc_id = crtc_index;
8288 acrtc->base.enabled = false;
8289 acrtc->otg_inst = -1;
8291 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8292 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8293 true, MAX_COLOR_LUT_ENTRIES);
8294 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8300 kfree(cursor_plane);
8305 static int to_drm_connector_type(enum signal_type st)
8308 case SIGNAL_TYPE_HDMI_TYPE_A:
8309 return DRM_MODE_CONNECTOR_HDMIA;
8310 case SIGNAL_TYPE_EDP:
8311 return DRM_MODE_CONNECTOR_eDP;
8312 case SIGNAL_TYPE_LVDS:
8313 return DRM_MODE_CONNECTOR_LVDS;
8314 case SIGNAL_TYPE_RGB:
8315 return DRM_MODE_CONNECTOR_VGA;
8316 case SIGNAL_TYPE_DISPLAY_PORT:
8317 case SIGNAL_TYPE_DISPLAY_PORT_MST:
8318 return DRM_MODE_CONNECTOR_DisplayPort;
8319 case SIGNAL_TYPE_DVI_DUAL_LINK:
8320 case SIGNAL_TYPE_DVI_SINGLE_LINK:
8321 return DRM_MODE_CONNECTOR_DVID;
8322 case SIGNAL_TYPE_VIRTUAL:
8323 return DRM_MODE_CONNECTOR_VIRTUAL;
8326 return DRM_MODE_CONNECTOR_Unknown;
8330 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8332 struct drm_encoder *encoder;
8334 /* There is only one encoder per connector */
8335 drm_connector_for_each_possible_encoder(connector, encoder)
8341 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8343 struct drm_encoder *encoder;
8344 struct amdgpu_encoder *amdgpu_encoder;
8346 encoder = amdgpu_dm_connector_to_encoder(connector);
8348 if (encoder == NULL)
8351 amdgpu_encoder = to_amdgpu_encoder(encoder);
8353 amdgpu_encoder->native_mode.clock = 0;
8355 if (!list_empty(&connector->probed_modes)) {
8356 struct drm_display_mode *preferred_mode = NULL;
8358 list_for_each_entry(preferred_mode,
8359 &connector->probed_modes,
8361 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8362 amdgpu_encoder->native_mode = *preferred_mode;
8370 static struct drm_display_mode *
8371 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8373 int hdisplay, int vdisplay)
8375 struct drm_device *dev = encoder->dev;
8376 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8377 struct drm_display_mode *mode = NULL;
8378 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8380 mode = drm_mode_duplicate(dev, native_mode);
8385 mode->hdisplay = hdisplay;
8386 mode->vdisplay = vdisplay;
8387 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8388 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8394 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8395 struct drm_connector *connector)
8397 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8398 struct drm_display_mode *mode = NULL;
8399 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8400 struct amdgpu_dm_connector *amdgpu_dm_connector =
8401 to_amdgpu_dm_connector(connector);
8405 char name[DRM_DISPLAY_MODE_LEN];
8408 } common_modes[] = {
8409 { "640x480", 640, 480},
8410 { "800x600", 800, 600},
8411 { "1024x768", 1024, 768},
8412 { "1280x720", 1280, 720},
8413 { "1280x800", 1280, 800},
8414 {"1280x1024", 1280, 1024},
8415 { "1440x900", 1440, 900},
8416 {"1680x1050", 1680, 1050},
8417 {"1600x1200", 1600, 1200},
8418 {"1920x1080", 1920, 1080},
8419 {"1920x1200", 1920, 1200}
8422 n = ARRAY_SIZE(common_modes);
8424 for (i = 0; i < n; i++) {
8425 struct drm_display_mode *curmode = NULL;
8426 bool mode_existed = false;
8428 if (common_modes[i].w > native_mode->hdisplay ||
8429 common_modes[i].h > native_mode->vdisplay ||
8430 (common_modes[i].w == native_mode->hdisplay &&
8431 common_modes[i].h == native_mode->vdisplay))
8434 list_for_each_entry(curmode, &connector->probed_modes, head) {
8435 if (common_modes[i].w == curmode->hdisplay &&
8436 common_modes[i].h == curmode->vdisplay) {
8437 mode_existed = true;
8445 mode = amdgpu_dm_create_common_mode(encoder,
8446 common_modes[i].name, common_modes[i].w,
8451 drm_mode_probed_add(connector, mode);
8452 amdgpu_dm_connector->num_modes++;
8456 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8458 struct drm_encoder *encoder;
8459 struct amdgpu_encoder *amdgpu_encoder;
8460 const struct drm_display_mode *native_mode;
8462 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8463 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8466 encoder = amdgpu_dm_connector_to_encoder(connector);
8470 amdgpu_encoder = to_amdgpu_encoder(encoder);
8472 native_mode = &amdgpu_encoder->native_mode;
8473 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8476 drm_connector_set_panel_orientation_with_quirk(connector,
8477 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8478 native_mode->hdisplay,
8479 native_mode->vdisplay);
8482 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8485 struct amdgpu_dm_connector *amdgpu_dm_connector =
8486 to_amdgpu_dm_connector(connector);
8489 /* empty probed_modes */
8490 INIT_LIST_HEAD(&connector->probed_modes);
8491 amdgpu_dm_connector->num_modes =
8492 drm_add_edid_modes(connector, edid);
8494 /* sorting the probed modes before calling function
8495 * amdgpu_dm_get_native_mode() since EDID can have
8496 * more than one preferred mode. The modes that are
8497 * later in the probed mode list could be of higher
8498 * and preferred resolution. For example, 3840x2160
8499 * resolution in base EDID preferred timing and 4096x2160
8500 * preferred resolution in DID extension block later.
8502 drm_mode_sort(&connector->probed_modes);
8503 amdgpu_dm_get_native_mode(connector);
8505 /* Freesync capabilities are reset by calling
8506 * drm_add_edid_modes() and need to be
8509 amdgpu_dm_update_freesync_caps(connector, edid);
8511 amdgpu_set_panel_orientation(connector);
8513 amdgpu_dm_connector->num_modes = 0;
8517 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8518 struct drm_display_mode *mode)
8520 struct drm_display_mode *m;
8522 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8523 if (drm_mode_equal(m, mode))
8530 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8532 const struct drm_display_mode *m;
8533 struct drm_display_mode *new_mode;
8535 uint32_t new_modes_count = 0;
8537 /* Standard FPS values
8546 * 60 - Commonly used
8547 * 48,72,96,120 - Multiples of 24
8549 static const uint32_t common_rates[] = {
8550 23976, 24000, 25000, 29970, 30000,
8551 48000, 50000, 60000, 72000, 96000, 120000
8555 * Find mode with highest refresh rate with the same resolution
8556 * as the preferred mode. Some monitors report a preferred mode
8557 * with lower resolution than the highest refresh rate supported.
8560 m = get_highest_refresh_rate_mode(aconnector, true);
8564 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8565 uint64_t target_vtotal, target_vtotal_diff;
8568 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8571 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8572 common_rates[i] > aconnector->max_vfreq * 1000)
8575 num = (unsigned long long)m->clock * 1000 * 1000;
8576 den = common_rates[i] * (unsigned long long)m->htotal;
8577 target_vtotal = div_u64(num, den);
8578 target_vtotal_diff = target_vtotal - m->vtotal;
8580 /* Check for illegal modes */
8581 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8582 m->vsync_end + target_vtotal_diff < m->vsync_start ||
8583 m->vtotal + target_vtotal_diff < m->vsync_end)
8586 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8590 new_mode->vtotal += (u16)target_vtotal_diff;
8591 new_mode->vsync_start += (u16)target_vtotal_diff;
8592 new_mode->vsync_end += (u16)target_vtotal_diff;
8593 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8594 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8596 if (!is_duplicate_mode(aconnector, new_mode)) {
8597 drm_mode_probed_add(&aconnector->base, new_mode);
8598 new_modes_count += 1;
8600 drm_mode_destroy(aconnector->base.dev, new_mode);
8603 return new_modes_count;
8606 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8609 struct amdgpu_dm_connector *amdgpu_dm_connector =
8610 to_amdgpu_dm_connector(connector);
8615 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8616 amdgpu_dm_connector->num_modes +=
8617 add_fs_modes(amdgpu_dm_connector);
8620 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8622 struct amdgpu_dm_connector *amdgpu_dm_connector =
8623 to_amdgpu_dm_connector(connector);
8624 struct drm_encoder *encoder;
8625 struct edid *edid = amdgpu_dm_connector->edid;
8627 encoder = amdgpu_dm_connector_to_encoder(connector);
8629 if (!drm_edid_is_valid(edid)) {
8630 amdgpu_dm_connector->num_modes =
8631 drm_add_modes_noedid(connector, 640, 480);
8633 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8634 amdgpu_dm_connector_add_common_modes(encoder, connector);
8635 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8637 amdgpu_dm_fbc_init(connector);
8639 return amdgpu_dm_connector->num_modes;
8642 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8643 struct amdgpu_dm_connector *aconnector,
8645 struct dc_link *link,
8648 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8651 * Some of the properties below require access to state, like bpc.
8652 * Allocate some default initial connector state with our reset helper.
8654 if (aconnector->base.funcs->reset)
8655 aconnector->base.funcs->reset(&aconnector->base);
8657 aconnector->connector_id = link_index;
8658 aconnector->dc_link = link;
8659 aconnector->base.interlace_allowed = false;
8660 aconnector->base.doublescan_allowed = false;
8661 aconnector->base.stereo_allowed = false;
8662 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8663 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8664 aconnector->audio_inst = -1;
8665 mutex_init(&aconnector->hpd_lock);
8668 * configure support HPD hot plug connector_>polled default value is 0
8669 * which means HPD hot plug not supported
8671 switch (connector_type) {
8672 case DRM_MODE_CONNECTOR_HDMIA:
8673 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8674 aconnector->base.ycbcr_420_allowed =
8675 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8677 case DRM_MODE_CONNECTOR_DisplayPort:
8678 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8679 link->link_enc = link_enc_cfg_get_link_enc(link);
8680 ASSERT(link->link_enc);
8682 aconnector->base.ycbcr_420_allowed =
8683 link->link_enc->features.dp_ycbcr420_supported ? true : false;
8685 case DRM_MODE_CONNECTOR_DVID:
8686 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8692 drm_object_attach_property(&aconnector->base.base,
8693 dm->ddev->mode_config.scaling_mode_property,
8694 DRM_MODE_SCALE_NONE);
8696 drm_object_attach_property(&aconnector->base.base,
8697 adev->mode_info.underscan_property,
8699 drm_object_attach_property(&aconnector->base.base,
8700 adev->mode_info.underscan_hborder_property,
8702 drm_object_attach_property(&aconnector->base.base,
8703 adev->mode_info.underscan_vborder_property,
8706 if (!aconnector->mst_port)
8707 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8709 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8710 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8711 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8713 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8714 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8715 drm_object_attach_property(&aconnector->base.base,
8716 adev->mode_info.abm_level_property, 0);
8719 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8720 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8721 connector_type == DRM_MODE_CONNECTOR_eDP) {
8722 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8724 if (!aconnector->mst_port)
8725 drm_connector_attach_vrr_capable_property(&aconnector->base);
8727 #ifdef CONFIG_DRM_AMD_DC_HDCP
8728 if (adev->dm.hdcp_workqueue)
8729 drm_connector_attach_content_protection_property(&aconnector->base, true);
8734 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8735 struct i2c_msg *msgs, int num)
8737 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8738 struct ddc_service *ddc_service = i2c->ddc_service;
8739 struct i2c_command cmd;
8743 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8748 cmd.number_of_payloads = num;
8749 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8752 for (i = 0; i < num; i++) {
8753 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8754 cmd.payloads[i].address = msgs[i].addr;
8755 cmd.payloads[i].length = msgs[i].len;
8756 cmd.payloads[i].data = msgs[i].buf;
8760 ddc_service->ctx->dc,
8761 ddc_service->link->link_index,
8765 kfree(cmd.payloads);
8769 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8771 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8774 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8775 .master_xfer = amdgpu_dm_i2c_xfer,
8776 .functionality = amdgpu_dm_i2c_func,
8779 static struct amdgpu_i2c_adapter *
8780 create_i2c(struct ddc_service *ddc_service,
8784 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8785 struct amdgpu_i2c_adapter *i2c;
8787 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8790 i2c->base.owner = THIS_MODULE;
8791 i2c->base.class = I2C_CLASS_DDC;
8792 i2c->base.dev.parent = &adev->pdev->dev;
8793 i2c->base.algo = &amdgpu_dm_i2c_algo;
8794 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8795 i2c_set_adapdata(&i2c->base, i2c);
8796 i2c->ddc_service = ddc_service;
8803 * Note: this function assumes that dc_link_detect() was called for the
8804 * dc_link which will be represented by this aconnector.
8806 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8807 struct amdgpu_dm_connector *aconnector,
8808 uint32_t link_index,
8809 struct amdgpu_encoder *aencoder)
8813 struct dc *dc = dm->dc;
8814 struct dc_link *link = dc_get_link_at_index(dc, link_index);
8815 struct amdgpu_i2c_adapter *i2c;
8817 link->priv = aconnector;
8819 DRM_DEBUG_DRIVER("%s()\n", __func__);
8821 i2c = create_i2c(link->ddc, link->link_index, &res);
8823 DRM_ERROR("Failed to create i2c adapter data\n");
8827 aconnector->i2c = i2c;
8828 res = i2c_add_adapter(&i2c->base);
8831 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8835 connector_type = to_drm_connector_type(link->connector_signal);
8837 res = drm_connector_init_with_ddc(
8840 &amdgpu_dm_connector_funcs,
8845 DRM_ERROR("connector_init failed\n");
8846 aconnector->connector_id = -1;
8850 drm_connector_helper_add(
8852 &amdgpu_dm_connector_helper_funcs);
8854 amdgpu_dm_connector_init_helper(
8861 drm_connector_attach_encoder(
8862 &aconnector->base, &aencoder->base);
8864 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8865 || connector_type == DRM_MODE_CONNECTOR_eDP)
8866 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8871 aconnector->i2c = NULL;
8876 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8878 switch (adev->mode_info.num_crtc) {
8895 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8896 struct amdgpu_encoder *aencoder,
8897 uint32_t link_index)
8899 struct amdgpu_device *adev = drm_to_adev(dev);
8901 int res = drm_encoder_init(dev,
8903 &amdgpu_dm_encoder_funcs,
8904 DRM_MODE_ENCODER_TMDS,
8907 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8910 aencoder->encoder_id = link_index;
8912 aencoder->encoder_id = -1;
8914 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8919 static void manage_dm_interrupts(struct amdgpu_device *adev,
8920 struct amdgpu_crtc *acrtc,
8924 * We have no guarantee that the frontend index maps to the same
8925 * backend index - some even map to more than one.
8927 * TODO: Use a different interrupt or check DC itself for the mapping.
8930 amdgpu_display_crtc_idx_to_irq_type(
8935 drm_crtc_vblank_on(&acrtc->base);
8938 &adev->pageflip_irq,
8940 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8947 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8955 &adev->pageflip_irq,
8957 drm_crtc_vblank_off(&acrtc->base);
8961 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8962 struct amdgpu_crtc *acrtc)
8965 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8968 * This reads the current state for the IRQ and force reapplies
8969 * the setting to hardware.
8971 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8975 is_scaling_state_different(const struct dm_connector_state *dm_state,
8976 const struct dm_connector_state *old_dm_state)
8978 if (dm_state->scaling != old_dm_state->scaling)
8980 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8981 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8983 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8984 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8986 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8987 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8992 #ifdef CONFIG_DRM_AMD_DC_HDCP
8993 static bool is_content_protection_different(struct drm_connector_state *state,
8994 const struct drm_connector_state *old_state,
8995 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8997 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8998 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
9000 /* Handle: Type0/1 change */
9001 if (old_state->hdcp_content_type != state->hdcp_content_type &&
9002 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
9003 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9007 /* CP is being re enabled, ignore this
9009 * Handles: ENABLED -> DESIRED
9011 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
9012 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
9013 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
9017 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
9019 * Handles: UNDESIRED -> ENABLED
9021 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
9022 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
9023 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9025 /* Stream removed and re-enabled
9027 * Can sometimes overlap with the HPD case,
9028 * thus set update_hdcp to false to avoid
9029 * setting HDCP multiple times.
9031 * Handles: DESIRED -> DESIRED (Special case)
9033 if (!(old_state->crtc && old_state->crtc->enabled) &&
9034 state->crtc && state->crtc->enabled &&
9035 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
9036 dm_con_state->update_hdcp = false;
9040 /* Hot-plug, headless s3, dpms
9042 * Only start HDCP if the display is connected/enabled.
9043 * update_hdcp flag will be set to false until the next
9046 * Handles: DESIRED -> DESIRED (Special case)
9048 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
9049 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
9050 dm_con_state->update_hdcp = false;
9055 * Handles: UNDESIRED -> UNDESIRED
9056 * DESIRED -> DESIRED
9057 * ENABLED -> ENABLED
9059 if (old_state->content_protection == state->content_protection)
9063 * Handles: UNDESIRED -> DESIRED
9064 * DESIRED -> UNDESIRED
9065 * ENABLED -> UNDESIRED
9067 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
9071 * Handles: DESIRED -> ENABLED
9077 static void remove_stream(struct amdgpu_device *adev,
9078 struct amdgpu_crtc *acrtc,
9079 struct dc_stream_state *stream)
9081 /* this is the update mode case */
9083 acrtc->otg_inst = -1;
9084 acrtc->enabled = false;
9087 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
9088 struct dc_cursor_position *position)
9090 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
9092 int xorigin = 0, yorigin = 0;
9094 if (!crtc || !plane->state->fb)
9097 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
9098 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
9099 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
9101 plane->state->crtc_w,
9102 plane->state->crtc_h);
9106 x = plane->state->crtc_x;
9107 y = plane->state->crtc_y;
9109 if (x <= -amdgpu_crtc->max_cursor_width ||
9110 y <= -amdgpu_crtc->max_cursor_height)
9114 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
9118 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
9121 position->enable = true;
9122 position->translate_by_source = true;
9125 position->x_hotspot = xorigin;
9126 position->y_hotspot = yorigin;
9131 static void handle_cursor_update(struct drm_plane *plane,
9132 struct drm_plane_state *old_plane_state)
9134 struct amdgpu_device *adev = drm_to_adev(plane->dev);
9135 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
9136 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
9137 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
9138 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
9139 uint64_t address = afb ? afb->address : 0;
9140 struct dc_cursor_position position = {0};
9141 struct dc_cursor_attributes attributes;
9144 if (!plane->state->fb && !old_plane_state->fb)
9147 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
9149 amdgpu_crtc->crtc_id,
9150 plane->state->crtc_w,
9151 plane->state->crtc_h);
9153 ret = get_cursor_position(plane, crtc, &position);
9157 if (!position.enable) {
9158 /* turn off cursor */
9159 if (crtc_state && crtc_state->stream) {
9160 mutex_lock(&adev->dm.dc_lock);
9161 dc_stream_set_cursor_position(crtc_state->stream,
9163 mutex_unlock(&adev->dm.dc_lock);
9168 amdgpu_crtc->cursor_width = plane->state->crtc_w;
9169 amdgpu_crtc->cursor_height = plane->state->crtc_h;
9171 memset(&attributes, 0, sizeof(attributes));
9172 attributes.address.high_part = upper_32_bits(address);
9173 attributes.address.low_part = lower_32_bits(address);
9174 attributes.width = plane->state->crtc_w;
9175 attributes.height = plane->state->crtc_h;
9176 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
9177 attributes.rotation_angle = 0;
9178 attributes.attribute_flags.value = 0;
9180 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
9182 if (crtc_state->stream) {
9183 mutex_lock(&adev->dm.dc_lock);
9184 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
9186 DRM_ERROR("DC failed to set cursor attributes\n");
9188 if (!dc_stream_set_cursor_position(crtc_state->stream,
9190 DRM_ERROR("DC failed to set cursor position\n");
9191 mutex_unlock(&adev->dm.dc_lock);
9195 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
9198 assert_spin_locked(&acrtc->base.dev->event_lock);
9199 WARN_ON(acrtc->event);
9201 acrtc->event = acrtc->base.state->event;
9203 /* Set the flip status */
9204 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
9206 /* Mark this event as consumed */
9207 acrtc->base.state->event = NULL;
9209 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
9213 static void update_freesync_state_on_stream(
9214 struct amdgpu_display_manager *dm,
9215 struct dm_crtc_state *new_crtc_state,
9216 struct dc_stream_state *new_stream,
9217 struct dc_plane_state *surface,
9218 u32 flip_timestamp_in_us)
9220 struct mod_vrr_params vrr_params;
9221 struct dc_info_packet vrr_infopacket = {0};
9222 struct amdgpu_device *adev = dm->adev;
9223 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9224 unsigned long flags;
9225 bool pack_sdp_v1_3 = false;
9231 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9232 * For now it's sufficient to just guard against these conditions.
9235 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9238 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9239 vrr_params = acrtc->dm_irq_params.vrr_params;
9242 mod_freesync_handle_preflip(
9243 dm->freesync_module,
9246 flip_timestamp_in_us,
9249 if (adev->family < AMDGPU_FAMILY_AI &&
9250 amdgpu_dm_vrr_active(new_crtc_state)) {
9251 mod_freesync_handle_v_update(dm->freesync_module,
9252 new_stream, &vrr_params);
9254 /* Need to call this before the frame ends. */
9255 dc_stream_adjust_vmin_vmax(dm->dc,
9256 new_crtc_state->stream,
9257 &vrr_params.adjust);
9261 mod_freesync_build_vrr_infopacket(
9262 dm->freesync_module,
9266 TRANSFER_FUNC_UNKNOWN,
9270 new_crtc_state->freesync_timing_changed |=
9271 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9273 sizeof(vrr_params.adjust)) != 0);
9275 new_crtc_state->freesync_vrr_info_changed |=
9276 (memcmp(&new_crtc_state->vrr_infopacket,
9278 sizeof(vrr_infopacket)) != 0);
9280 acrtc->dm_irq_params.vrr_params = vrr_params;
9281 new_crtc_state->vrr_infopacket = vrr_infopacket;
9283 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
9284 new_stream->vrr_infopacket = vrr_infopacket;
9286 if (new_crtc_state->freesync_vrr_info_changed)
9287 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9288 new_crtc_state->base.crtc->base.id,
9289 (int)new_crtc_state->base.vrr_enabled,
9290 (int)vrr_params.state);
9292 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9295 static void update_stream_irq_parameters(
9296 struct amdgpu_display_manager *dm,
9297 struct dm_crtc_state *new_crtc_state)
9299 struct dc_stream_state *new_stream = new_crtc_state->stream;
9300 struct mod_vrr_params vrr_params;
9301 struct mod_freesync_config config = new_crtc_state->freesync_config;
9302 struct amdgpu_device *adev = dm->adev;
9303 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9304 unsigned long flags;
9310 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9311 * For now it's sufficient to just guard against these conditions.
9313 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9316 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9317 vrr_params = acrtc->dm_irq_params.vrr_params;
9319 if (new_crtc_state->vrr_supported &&
9320 config.min_refresh_in_uhz &&
9321 config.max_refresh_in_uhz) {
9323 * if freesync compatible mode was set, config.state will be set
9326 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9327 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9328 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9329 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9330 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9331 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9332 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9334 config.state = new_crtc_state->base.vrr_enabled ?
9335 VRR_STATE_ACTIVE_VARIABLE :
9339 config.state = VRR_STATE_UNSUPPORTED;
9342 mod_freesync_build_vrr_params(dm->freesync_module,
9344 &config, &vrr_params);
9346 new_crtc_state->freesync_timing_changed |=
9347 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9348 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9350 new_crtc_state->freesync_config = config;
9351 /* Copy state for access from DM IRQ handler */
9352 acrtc->dm_irq_params.freesync_config = config;
9353 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9354 acrtc->dm_irq_params.vrr_params = vrr_params;
9355 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9358 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9359 struct dm_crtc_state *new_state)
9361 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9362 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9364 if (!old_vrr_active && new_vrr_active) {
9365 /* Transition VRR inactive -> active:
9366 * While VRR is active, we must not disable vblank irq, as a
9367 * reenable after disable would compute bogus vblank/pflip
9368 * timestamps if it likely happened inside display front-porch.
9370 * We also need vupdate irq for the actual core vblank handling
9373 dm_set_vupdate_irq(new_state->base.crtc, true);
9374 drm_crtc_vblank_get(new_state->base.crtc);
9375 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9376 __func__, new_state->base.crtc->base.id);
9377 } else if (old_vrr_active && !new_vrr_active) {
9378 /* Transition VRR active -> inactive:
9379 * Allow vblank irq disable again for fixed refresh rate.
9381 dm_set_vupdate_irq(new_state->base.crtc, false);
9382 drm_crtc_vblank_put(new_state->base.crtc);
9383 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9384 __func__, new_state->base.crtc->base.id);
9388 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9390 struct drm_plane *plane;
9391 struct drm_plane_state *old_plane_state;
9395 * TODO: Make this per-stream so we don't issue redundant updates for
9396 * commits with multiple streams.
9398 for_each_old_plane_in_state(state, plane, old_plane_state, i)
9399 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9400 handle_cursor_update(plane, old_plane_state);
9403 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9404 struct dc_state *dc_state,
9405 struct drm_device *dev,
9406 struct amdgpu_display_manager *dm,
9407 struct drm_crtc *pcrtc,
9408 bool wait_for_vblank)
9411 uint64_t timestamp_ns;
9412 struct drm_plane *plane;
9413 struct drm_plane_state *old_plane_state, *new_plane_state;
9414 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9415 struct drm_crtc_state *new_pcrtc_state =
9416 drm_atomic_get_new_crtc_state(state, pcrtc);
9417 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9418 struct dm_crtc_state *dm_old_crtc_state =
9419 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9420 int planes_count = 0, vpos, hpos;
9421 unsigned long flags;
9422 uint32_t target_vblank, last_flip_vblank;
9423 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9424 bool cursor_update = false;
9425 bool pflip_present = false;
9427 struct dc_surface_update surface_updates[MAX_SURFACES];
9428 struct dc_plane_info plane_infos[MAX_SURFACES];
9429 struct dc_scaling_info scaling_infos[MAX_SURFACES];
9430 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9431 struct dc_stream_update stream_update;
9434 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9437 dm_error("Failed to allocate update bundle\n");
9442 * Disable the cursor first if we're disabling all the planes.
9443 * It'll remain on the screen after the planes are re-enabled
9446 if (acrtc_state->active_planes == 0)
9447 amdgpu_dm_commit_cursors(state);
9449 /* update planes when needed */
9450 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9451 struct drm_crtc *crtc = new_plane_state->crtc;
9452 struct drm_crtc_state *new_crtc_state;
9453 struct drm_framebuffer *fb = new_plane_state->fb;
9454 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9455 bool plane_needs_flip;
9456 struct dc_plane_state *dc_plane;
9457 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9459 /* Cursor plane is handled after stream updates */
9460 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9461 if ((fb && crtc == pcrtc) ||
9462 (old_plane_state->fb && old_plane_state->crtc == pcrtc))
9463 cursor_update = true;
9468 if (!fb || !crtc || pcrtc != crtc)
9471 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9472 if (!new_crtc_state->active)
9475 dc_plane = dm_new_plane_state->dc_state;
9477 bundle->surface_updates[planes_count].surface = dc_plane;
9478 if (new_pcrtc_state->color_mgmt_changed) {
9479 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9480 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9481 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9484 fill_dc_scaling_info(dm->adev, new_plane_state,
9485 &bundle->scaling_infos[planes_count]);
9487 bundle->surface_updates[planes_count].scaling_info =
9488 &bundle->scaling_infos[planes_count];
9490 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9492 pflip_present = pflip_present || plane_needs_flip;
9494 if (!plane_needs_flip) {
9499 fill_dc_plane_info_and_addr(
9500 dm->adev, new_plane_state,
9502 &bundle->plane_infos[planes_count],
9503 &bundle->flip_addrs[planes_count].address,
9504 afb->tmz_surface, false);
9506 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
9507 new_plane_state->plane->index,
9508 bundle->plane_infos[planes_count].dcc.enable);
9510 bundle->surface_updates[planes_count].plane_info =
9511 &bundle->plane_infos[planes_count];
9513 fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
9515 &bundle->flip_addrs[planes_count]);
9518 * Only allow immediate flips for fast updates that don't
9519 * change FB pitch, DCC state, rotation or mirroing.
9521 bundle->flip_addrs[planes_count].flip_immediate =
9522 crtc->state->async_flip &&
9523 acrtc_state->update_type == UPDATE_TYPE_FAST;
9525 timestamp_ns = ktime_get_ns();
9526 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9527 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9528 bundle->surface_updates[planes_count].surface = dc_plane;
9530 if (!bundle->surface_updates[planes_count].surface) {
9531 DRM_ERROR("No surface for CRTC: id=%d\n",
9532 acrtc_attach->crtc_id);
9536 if (plane == pcrtc->primary)
9537 update_freesync_state_on_stream(
9540 acrtc_state->stream,
9542 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9544 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
9546 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9547 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9553 if (pflip_present) {
9555 /* Use old throttling in non-vrr fixed refresh rate mode
9556 * to keep flip scheduling based on target vblank counts
9557 * working in a backwards compatible way, e.g., for
9558 * clients using the GLX_OML_sync_control extension or
9559 * DRI3/Present extension with defined target_msc.
9561 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9564 /* For variable refresh rate mode only:
9565 * Get vblank of last completed flip to avoid > 1 vrr
9566 * flips per video frame by use of throttling, but allow
9567 * flip programming anywhere in the possibly large
9568 * variable vrr vblank interval for fine-grained flip
9569 * timing control and more opportunity to avoid stutter
9570 * on late submission of flips.
9572 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9573 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9574 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9577 target_vblank = last_flip_vblank + wait_for_vblank;
9580 * Wait until we're out of the vertical blank period before the one
9581 * targeted by the flip
9583 while ((acrtc_attach->enabled &&
9584 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9585 0, &vpos, &hpos, NULL,
9586 NULL, &pcrtc->hwmode)
9587 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9588 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9589 (int)(target_vblank -
9590 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9591 usleep_range(1000, 1100);
9595 * Prepare the flip event for the pageflip interrupt to handle.
9597 * This only works in the case where we've already turned on the
9598 * appropriate hardware blocks (eg. HUBP) so in the transition case
9599 * from 0 -> n planes we have to skip a hardware generated event
9600 * and rely on sending it from software.
9602 if (acrtc_attach->base.state->event &&
9603 acrtc_state->active_planes > 0) {
9604 drm_crtc_vblank_get(pcrtc);
9606 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9608 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9609 prepare_flip_isr(acrtc_attach);
9611 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9614 if (acrtc_state->stream) {
9615 if (acrtc_state->freesync_vrr_info_changed)
9616 bundle->stream_update.vrr_infopacket =
9617 &acrtc_state->stream->vrr_infopacket;
9619 } else if (cursor_update && acrtc_state->active_planes > 0 &&
9620 acrtc_attach->base.state->event) {
9621 drm_crtc_vblank_get(pcrtc);
9623 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9625 acrtc_attach->event = acrtc_attach->base.state->event;
9626 acrtc_attach->base.state->event = NULL;
9628 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9631 /* Update the planes if changed or disable if we don't have any. */
9632 if ((planes_count || acrtc_state->active_planes == 0) &&
9633 acrtc_state->stream) {
9635 * If PSR or idle optimizations are enabled then flush out
9636 * any pending work before hardware programming.
9638 if (dm->vblank_control_workqueue)
9639 flush_workqueue(dm->vblank_control_workqueue);
9641 bundle->stream_update.stream = acrtc_state->stream;
9642 if (new_pcrtc_state->mode_changed) {
9643 bundle->stream_update.src = acrtc_state->stream->src;
9644 bundle->stream_update.dst = acrtc_state->stream->dst;
9647 if (new_pcrtc_state->color_mgmt_changed) {
9649 * TODO: This isn't fully correct since we've actually
9650 * already modified the stream in place.
9652 bundle->stream_update.gamut_remap =
9653 &acrtc_state->stream->gamut_remap_matrix;
9654 bundle->stream_update.output_csc_transform =
9655 &acrtc_state->stream->csc_color_matrix;
9656 bundle->stream_update.out_transfer_func =
9657 acrtc_state->stream->out_transfer_func;
9660 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9661 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9662 bundle->stream_update.abm_level = &acrtc_state->abm_level;
9665 * If FreeSync state on the stream has changed then we need to
9666 * re-adjust the min/max bounds now that DC doesn't handle this
9667 * as part of commit.
9669 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9670 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9671 dc_stream_adjust_vmin_vmax(
9672 dm->dc, acrtc_state->stream,
9673 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9674 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9676 mutex_lock(&dm->dc_lock);
9677 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9678 acrtc_state->stream->link->psr_settings.psr_allow_active)
9679 amdgpu_dm_psr_disable(acrtc_state->stream);
9681 dc_commit_updates_for_stream(dm->dc,
9682 bundle->surface_updates,
9684 acrtc_state->stream,
9685 &bundle->stream_update,
9689 * Enable or disable the interrupts on the backend.
9691 * Most pipes are put into power gating when unused.
9693 * When power gating is enabled on a pipe we lose the
9694 * interrupt enablement state when power gating is disabled.
9696 * So we need to update the IRQ control state in hardware
9697 * whenever the pipe turns on (since it could be previously
9698 * power gated) or off (since some pipes can't be power gated
9701 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9702 dm_update_pflip_irq_state(drm_to_adev(dev),
9705 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9706 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9707 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9708 amdgpu_dm_link_setup_psr(acrtc_state->stream);
9710 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9711 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9712 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9713 struct amdgpu_dm_connector *aconn =
9714 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9716 if (aconn->psr_skip_count > 0)
9717 aconn->psr_skip_count--;
9719 /* Allow PSR when skip count is 0. */
9720 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9723 * If sink supports PSR SU, there is no need to rely on
9724 * a vblank event disable request to enable PSR. PSR SU
9725 * can be enabled immediately once OS demonstrates an
9726 * adequate number of fast atomic commits to notify KMD
9727 * of update events. See `vblank_control_worker()`.
9729 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
9730 acrtc_attach->dm_irq_params.allow_psr_entry &&
9731 !acrtc_state->stream->link->psr_settings.psr_allow_active)
9732 amdgpu_dm_psr_enable(acrtc_state->stream);
9734 acrtc_attach->dm_irq_params.allow_psr_entry = false;
9737 mutex_unlock(&dm->dc_lock);
9741 * Update cursor state *after* programming all the planes.
9742 * This avoids redundant programming in the case where we're going
9743 * to be disabling a single plane - those pipes are being disabled.
9745 if (acrtc_state->active_planes)
9746 amdgpu_dm_commit_cursors(state);
9752 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9753 struct drm_atomic_state *state)
9755 struct amdgpu_device *adev = drm_to_adev(dev);
9756 struct amdgpu_dm_connector *aconnector;
9757 struct drm_connector *connector;
9758 struct drm_connector_state *old_con_state, *new_con_state;
9759 struct drm_crtc_state *new_crtc_state;
9760 struct dm_crtc_state *new_dm_crtc_state;
9761 const struct dc_stream_status *status;
9764 /* Notify device removals. */
9765 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9766 if (old_con_state->crtc != new_con_state->crtc) {
9767 /* CRTC changes require notification. */
9771 if (!new_con_state->crtc)
9774 new_crtc_state = drm_atomic_get_new_crtc_state(
9775 state, new_con_state->crtc);
9777 if (!new_crtc_state)
9780 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9784 aconnector = to_amdgpu_dm_connector(connector);
9786 mutex_lock(&adev->dm.audio_lock);
9787 inst = aconnector->audio_inst;
9788 aconnector->audio_inst = -1;
9789 mutex_unlock(&adev->dm.audio_lock);
9791 amdgpu_dm_audio_eld_notify(adev, inst);
9794 /* Notify audio device additions. */
9795 for_each_new_connector_in_state(state, connector, new_con_state, i) {
9796 if (!new_con_state->crtc)
9799 new_crtc_state = drm_atomic_get_new_crtc_state(
9800 state, new_con_state->crtc);
9802 if (!new_crtc_state)
9805 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9808 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9809 if (!new_dm_crtc_state->stream)
9812 status = dc_stream_get_status(new_dm_crtc_state->stream);
9816 aconnector = to_amdgpu_dm_connector(connector);
9818 mutex_lock(&adev->dm.audio_lock);
9819 inst = status->audio_inst;
9820 aconnector->audio_inst = inst;
9821 mutex_unlock(&adev->dm.audio_lock);
9823 amdgpu_dm_audio_eld_notify(adev, inst);
9828 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9829 * @crtc_state: the DRM CRTC state
9830 * @stream_state: the DC stream state.
9832 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9833 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9835 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9836 struct dc_stream_state *stream_state)
9838 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9842 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9843 * @state: The atomic state to commit
9845 * This will tell DC to commit the constructed DC state from atomic_check,
9846 * programming the hardware. Any failures here implies a hardware failure, since
9847 * atomic check should have filtered anything non-kosher.
9849 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9851 struct drm_device *dev = state->dev;
9852 struct amdgpu_device *adev = drm_to_adev(dev);
9853 struct amdgpu_display_manager *dm = &adev->dm;
9854 struct dm_atomic_state *dm_state;
9855 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9857 struct drm_crtc *crtc;
9858 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9859 unsigned long flags;
9860 bool wait_for_vblank = true;
9861 struct drm_connector *connector;
9862 struct drm_connector_state *old_con_state, *new_con_state;
9863 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9864 int crtc_disable_count = 0;
9865 bool mode_set_reset_required = false;
9868 trace_amdgpu_dm_atomic_commit_tail_begin(state);
9870 r = drm_atomic_helper_wait_for_fences(dev, state, false);
9872 DRM_ERROR("Waiting for fences timed out!");
9874 drm_atomic_helper_update_legacy_modeset_state(dev, state);
9876 dm_state = dm_atomic_get_new_state(state);
9877 if (dm_state && dm_state->context) {
9878 dc_state = dm_state->context;
9880 /* No state changes, retain current state. */
9881 dc_state_temp = dc_create_state(dm->dc);
9882 ASSERT(dc_state_temp);
9883 dc_state = dc_state_temp;
9884 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9887 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9888 new_crtc_state, i) {
9889 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9891 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9893 if (old_crtc_state->active &&
9894 (!new_crtc_state->active ||
9895 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9896 manage_dm_interrupts(adev, acrtc, false);
9897 dc_stream_release(dm_old_crtc_state->stream);
9901 drm_atomic_helper_calc_timestamping_constants(state);
9903 /* update changed items */
9904 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9905 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9907 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9908 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9910 drm_dbg_state(state->dev,
9911 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9912 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9913 "connectors_changed:%d\n",
9915 new_crtc_state->enable,
9916 new_crtc_state->active,
9917 new_crtc_state->planes_changed,
9918 new_crtc_state->mode_changed,
9919 new_crtc_state->active_changed,
9920 new_crtc_state->connectors_changed);
9922 /* Disable cursor if disabling crtc */
9923 if (old_crtc_state->active && !new_crtc_state->active) {
9924 struct dc_cursor_position position;
9926 memset(&position, 0, sizeof(position));
9927 mutex_lock(&dm->dc_lock);
9928 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9929 mutex_unlock(&dm->dc_lock);
9932 /* Copy all transient state flags into dc state */
9933 if (dm_new_crtc_state->stream) {
9934 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9935 dm_new_crtc_state->stream);
9938 /* handles headless hotplug case, updating new_state and
9939 * aconnector as needed
9942 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9944 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9946 if (!dm_new_crtc_state->stream) {
9948 * this could happen because of issues with
9949 * userspace notifications delivery.
9950 * In this case userspace tries to set mode on
9951 * display which is disconnected in fact.
9952 * dc_sink is NULL in this case on aconnector.
9953 * We expect reset mode will come soon.
9955 * This can also happen when unplug is done
9956 * during resume sequence ended
9958 * In this case, we want to pretend we still
9959 * have a sink to keep the pipe running so that
9960 * hw state is consistent with the sw state
9962 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9963 __func__, acrtc->base.base.id);
9967 if (dm_old_crtc_state->stream)
9968 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9970 pm_runtime_get_noresume(dev->dev);
9972 acrtc->enabled = true;
9973 acrtc->hw_mode = new_crtc_state->mode;
9974 crtc->hwmode = new_crtc_state->mode;
9975 mode_set_reset_required = true;
9976 } else if (modereset_required(new_crtc_state)) {
9977 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9978 /* i.e. reset mode */
9979 if (dm_old_crtc_state->stream)
9980 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9982 mode_set_reset_required = true;
9984 } /* for_each_crtc_in_state() */
9987 /* if there mode set or reset, disable eDP PSR */
9988 if (mode_set_reset_required) {
9989 if (dm->vblank_control_workqueue)
9990 flush_workqueue(dm->vblank_control_workqueue);
9992 amdgpu_dm_psr_disable_all(dm);
9995 dm_enable_per_frame_crtc_master_sync(dc_state);
9996 mutex_lock(&dm->dc_lock);
9997 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9999 /* Allow idle optimization when vblank count is 0 for display off */
10000 if (dm->active_vblank_irq_count == 0)
10001 dc_allow_idle_optimizations(dm->dc, true);
10002 mutex_unlock(&dm->dc_lock);
10005 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10006 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
10008 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10010 if (dm_new_crtc_state->stream != NULL) {
10011 const struct dc_stream_status *status =
10012 dc_stream_get_status(dm_new_crtc_state->stream);
10015 status = dc_stream_get_status_from_state(dc_state,
10016 dm_new_crtc_state->stream);
10018 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
10020 acrtc->otg_inst = status->primary_otg_inst;
10023 #ifdef CONFIG_DRM_AMD_DC_HDCP
10024 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10025 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10026 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10027 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10029 new_crtc_state = NULL;
10032 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
10034 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10036 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
10037 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
10038 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
10039 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
10040 dm_new_con_state->update_hdcp = true;
10044 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
10045 hdcp_update_display(
10046 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
10047 new_con_state->hdcp_content_type,
10048 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
10052 /* Handle connector state changes */
10053 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10054 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10055 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10056 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10057 struct dc_surface_update dummy_updates[MAX_SURFACES];
10058 struct dc_stream_update stream_update;
10059 struct dc_info_packet hdr_packet;
10060 struct dc_stream_status *status = NULL;
10061 bool abm_changed, hdr_changed, scaling_changed;
10063 memset(&dummy_updates, 0, sizeof(dummy_updates));
10064 memset(&stream_update, 0, sizeof(stream_update));
10067 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
10068 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
10071 /* Skip any modesets/resets */
10072 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
10075 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10076 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10078 scaling_changed = is_scaling_state_different(dm_new_con_state,
10081 abm_changed = dm_new_crtc_state->abm_level !=
10082 dm_old_crtc_state->abm_level;
10085 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
10087 if (!scaling_changed && !abm_changed && !hdr_changed)
10090 stream_update.stream = dm_new_crtc_state->stream;
10091 if (scaling_changed) {
10092 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
10093 dm_new_con_state, dm_new_crtc_state->stream);
10095 stream_update.src = dm_new_crtc_state->stream->src;
10096 stream_update.dst = dm_new_crtc_state->stream->dst;
10100 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
10102 stream_update.abm_level = &dm_new_crtc_state->abm_level;
10106 fill_hdr_info_packet(new_con_state, &hdr_packet);
10107 stream_update.hdr_static_metadata = &hdr_packet;
10110 status = dc_stream_get_status(dm_new_crtc_state->stream);
10112 if (WARN_ON(!status))
10115 WARN_ON(!status->plane_count);
10118 * TODO: DC refuses to perform stream updates without a dc_surface_update.
10119 * Here we create an empty update on each plane.
10120 * To fix this, DC should permit updating only stream properties.
10122 for (j = 0; j < status->plane_count; j++)
10123 dummy_updates[j].surface = status->plane_states[0];
10126 mutex_lock(&dm->dc_lock);
10127 dc_commit_updates_for_stream(dm->dc,
10129 status->plane_count,
10130 dm_new_crtc_state->stream,
10133 mutex_unlock(&dm->dc_lock);
10136 /* Count number of newly disabled CRTCs for dropping PM refs later. */
10137 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
10138 new_crtc_state, i) {
10139 if (old_crtc_state->active && !new_crtc_state->active)
10140 crtc_disable_count++;
10142 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10143 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10145 /* For freesync config update on crtc state and params for irq */
10146 update_stream_irq_parameters(dm, dm_new_crtc_state);
10148 /* Handle vrr on->off / off->on transitions */
10149 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
10150 dm_new_crtc_state);
10154 * Enable interrupts for CRTCs that are newly enabled or went through
10155 * a modeset. It was intentionally deferred until after the front end
10156 * state was modified to wait until the OTG was on and so the IRQ
10157 * handlers didn't access stale or invalid state.
10159 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10160 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
10161 #ifdef CONFIG_DEBUG_FS
10162 bool configure_crc = false;
10163 enum amdgpu_dm_pipe_crc_source cur_crc_src;
10164 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
10165 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
10167 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10168 cur_crc_src = acrtc->dm_irq_params.crc_src;
10169 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10171 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10173 if (new_crtc_state->active &&
10174 (!old_crtc_state->active ||
10175 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
10176 dc_stream_retain(dm_new_crtc_state->stream);
10177 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
10178 manage_dm_interrupts(adev, acrtc, true);
10180 #ifdef CONFIG_DEBUG_FS
10182 * Frontend may have changed so reapply the CRC capture
10183 * settings for the stream.
10185 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10187 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
10188 configure_crc = true;
10189 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
10190 if (amdgpu_dm_crc_window_is_activated(crtc)) {
10191 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10192 acrtc->dm_irq_params.crc_window.update_win = true;
10193 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
10194 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
10195 crc_rd_wrk->crtc = crtc;
10196 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
10197 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10203 if (amdgpu_dm_crtc_configure_crc_source(
10204 crtc, dm_new_crtc_state, cur_crc_src))
10205 DRM_DEBUG_DRIVER("Failed to configure crc source");
10210 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
10211 if (new_crtc_state->async_flip)
10212 wait_for_vblank = false;
10214 /* update planes when needed per crtc*/
10215 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
10216 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10218 if (dm_new_crtc_state->stream)
10219 amdgpu_dm_commit_planes(state, dc_state, dev,
10220 dm, crtc, wait_for_vblank);
10223 /* Update audio instances for each connector. */
10224 amdgpu_dm_commit_audio(dev, state);
10226 /* restore the backlight level */
10227 for (i = 0; i < dm->num_of_edps; i++) {
10228 if (dm->backlight_dev[i] &&
10229 (dm->actual_brightness[i] != dm->brightness[i]))
10230 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
10234 * send vblank event on all events not handled in flip and
10235 * mark consumed event for drm_atomic_helper_commit_hw_done
10237 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10238 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10240 if (new_crtc_state->event)
10241 drm_send_event_locked(dev, &new_crtc_state->event->base);
10243 new_crtc_state->event = NULL;
10245 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10247 /* Signal HW programming completion */
10248 drm_atomic_helper_commit_hw_done(state);
10250 if (wait_for_vblank)
10251 drm_atomic_helper_wait_for_flip_done(dev, state);
10253 drm_atomic_helper_cleanup_planes(dev, state);
10255 /* return the stolen vga memory back to VRAM */
10256 if (!adev->mman.keep_stolen_vga_memory)
10257 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
10258 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
10261 * Finally, drop a runtime PM reference for each newly disabled CRTC,
10262 * so we can put the GPU into runtime suspend if we're not driving any
10265 for (i = 0; i < crtc_disable_count; i++)
10266 pm_runtime_put_autosuspend(dev->dev);
10267 pm_runtime_mark_last_busy(dev->dev);
10270 dc_release_state(dc_state_temp);
10274 static int dm_force_atomic_commit(struct drm_connector *connector)
10277 struct drm_device *ddev = connector->dev;
10278 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
10279 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10280 struct drm_plane *plane = disconnected_acrtc->base.primary;
10281 struct drm_connector_state *conn_state;
10282 struct drm_crtc_state *crtc_state;
10283 struct drm_plane_state *plane_state;
10288 state->acquire_ctx = ddev->mode_config.acquire_ctx;
10290 /* Construct an atomic state to restore previous display setting */
10293 * Attach connectors to drm_atomic_state
10295 conn_state = drm_atomic_get_connector_state(state, connector);
10297 ret = PTR_ERR_OR_ZERO(conn_state);
10301 /* Attach crtc to drm_atomic_state*/
10302 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10304 ret = PTR_ERR_OR_ZERO(crtc_state);
10308 /* force a restore */
10309 crtc_state->mode_changed = true;
10311 /* Attach plane to drm_atomic_state */
10312 plane_state = drm_atomic_get_plane_state(state, plane);
10314 ret = PTR_ERR_OR_ZERO(plane_state);
10318 /* Call commit internally with the state we just constructed */
10319 ret = drm_atomic_commit(state);
10322 drm_atomic_state_put(state);
10324 DRM_ERROR("Restoring old state failed with %i\n", ret);
10330 * This function handles all cases when set mode does not come upon hotplug.
10331 * This includes when a display is unplugged then plugged back into the
10332 * same port and when running without usermode desktop manager supprot
10334 void dm_restore_drm_connector_state(struct drm_device *dev,
10335 struct drm_connector *connector)
10337 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10338 struct amdgpu_crtc *disconnected_acrtc;
10339 struct dm_crtc_state *acrtc_state;
10341 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10344 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10345 if (!disconnected_acrtc)
10348 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10349 if (!acrtc_state->stream)
10353 * If the previous sink is not released and different from the current,
10354 * we deduce we are in a state where we can not rely on usermode call
10355 * to turn on the display, so we do it here
10357 if (acrtc_state->stream->sink != aconnector->dc_sink)
10358 dm_force_atomic_commit(&aconnector->base);
10362 * Grabs all modesetting locks to serialize against any blocking commits,
10363 * Waits for completion of all non blocking commits.
10365 static int do_aquire_global_lock(struct drm_device *dev,
10366 struct drm_atomic_state *state)
10368 struct drm_crtc *crtc;
10369 struct drm_crtc_commit *commit;
10373 * Adding all modeset locks to aquire_ctx will
10374 * ensure that when the framework release it the
10375 * extra locks we are locking here will get released to
10377 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10381 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10382 spin_lock(&crtc->commit_lock);
10383 commit = list_first_entry_or_null(&crtc->commit_list,
10384 struct drm_crtc_commit, commit_entry);
10386 drm_crtc_commit_get(commit);
10387 spin_unlock(&crtc->commit_lock);
10393 * Make sure all pending HW programming completed and
10396 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10399 ret = wait_for_completion_interruptible_timeout(
10400 &commit->flip_done, 10*HZ);
10403 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10404 "timed out\n", crtc->base.id, crtc->name);
10406 drm_crtc_commit_put(commit);
10409 return ret < 0 ? ret : 0;
10412 static void get_freesync_config_for_crtc(
10413 struct dm_crtc_state *new_crtc_state,
10414 struct dm_connector_state *new_con_state)
10416 struct mod_freesync_config config = {0};
10417 struct amdgpu_dm_connector *aconnector =
10418 to_amdgpu_dm_connector(new_con_state->base.connector);
10419 struct drm_display_mode *mode = &new_crtc_state->base.mode;
10420 int vrefresh = drm_mode_vrefresh(mode);
10421 bool fs_vid_mode = false;
10423 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10424 vrefresh >= aconnector->min_vfreq &&
10425 vrefresh <= aconnector->max_vfreq;
10427 if (new_crtc_state->vrr_supported) {
10428 new_crtc_state->stream->ignore_msa_timing_param = true;
10429 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10431 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10432 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10433 config.vsif_supported = true;
10437 config.state = VRR_STATE_ACTIVE_FIXED;
10438 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10440 } else if (new_crtc_state->base.vrr_enabled) {
10441 config.state = VRR_STATE_ACTIVE_VARIABLE;
10443 config.state = VRR_STATE_INACTIVE;
10447 new_crtc_state->freesync_config = config;
10450 static void reset_freesync_config_for_crtc(
10451 struct dm_crtc_state *new_crtc_state)
10453 new_crtc_state->vrr_supported = false;
10455 memset(&new_crtc_state->vrr_infopacket, 0,
10456 sizeof(new_crtc_state->vrr_infopacket));
10460 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10461 struct drm_crtc_state *new_crtc_state)
10463 const struct drm_display_mode *old_mode, *new_mode;
10465 if (!old_crtc_state || !new_crtc_state)
10468 old_mode = &old_crtc_state->mode;
10469 new_mode = &new_crtc_state->mode;
10471 if (old_mode->clock == new_mode->clock &&
10472 old_mode->hdisplay == new_mode->hdisplay &&
10473 old_mode->vdisplay == new_mode->vdisplay &&
10474 old_mode->htotal == new_mode->htotal &&
10475 old_mode->vtotal != new_mode->vtotal &&
10476 old_mode->hsync_start == new_mode->hsync_start &&
10477 old_mode->vsync_start != new_mode->vsync_start &&
10478 old_mode->hsync_end == new_mode->hsync_end &&
10479 old_mode->vsync_end != new_mode->vsync_end &&
10480 old_mode->hskew == new_mode->hskew &&
10481 old_mode->vscan == new_mode->vscan &&
10482 (old_mode->vsync_end - old_mode->vsync_start) ==
10483 (new_mode->vsync_end - new_mode->vsync_start))
10489 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10490 uint64_t num, den, res;
10491 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10493 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10495 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10496 den = (unsigned long long)new_crtc_state->mode.htotal *
10497 (unsigned long long)new_crtc_state->mode.vtotal;
10499 res = div_u64(num, den);
10500 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10503 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10504 struct drm_atomic_state *state,
10505 struct drm_crtc *crtc,
10506 struct drm_crtc_state *old_crtc_state,
10507 struct drm_crtc_state *new_crtc_state,
10509 bool *lock_and_validation_needed)
10511 struct dm_atomic_state *dm_state = NULL;
10512 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10513 struct dc_stream_state *new_stream;
10517 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10518 * update changed items
10520 struct amdgpu_crtc *acrtc = NULL;
10521 struct amdgpu_dm_connector *aconnector = NULL;
10522 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10523 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10527 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10528 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10529 acrtc = to_amdgpu_crtc(crtc);
10530 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10532 /* TODO This hack should go away */
10533 if (aconnector && enable) {
10534 /* Make sure fake sink is created in plug-in scenario */
10535 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10536 &aconnector->base);
10537 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10538 &aconnector->base);
10540 if (IS_ERR(drm_new_conn_state)) {
10541 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10545 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10546 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10548 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10551 new_stream = create_validate_stream_for_sink(aconnector,
10552 &new_crtc_state->mode,
10554 dm_old_crtc_state->stream);
10557 * we can have no stream on ACTION_SET if a display
10558 * was disconnected during S3, in this case it is not an
10559 * error, the OS will be updated after detection, and
10560 * will do the right thing on next atomic commit
10564 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10565 __func__, acrtc->base.base.id);
10571 * TODO: Check VSDB bits to decide whether this should
10572 * be enabled or not.
10574 new_stream->triggered_crtc_reset.enabled =
10575 dm->force_timing_sync;
10577 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10579 ret = fill_hdr_info_packet(drm_new_conn_state,
10580 &new_stream->hdr_static_metadata);
10585 * If we already removed the old stream from the context
10586 * (and set the new stream to NULL) then we can't reuse
10587 * the old stream even if the stream and scaling are unchanged.
10588 * We'll hit the BUG_ON and black screen.
10590 * TODO: Refactor this function to allow this check to work
10591 * in all conditions.
10593 if (dm_new_crtc_state->stream &&
10594 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10597 if (dm_new_crtc_state->stream &&
10598 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10599 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10600 new_crtc_state->mode_changed = false;
10601 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10602 new_crtc_state->mode_changed);
10606 /* mode_changed flag may get updated above, need to check again */
10607 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10610 drm_dbg_state(state->dev,
10611 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10612 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10613 "connectors_changed:%d\n",
10615 new_crtc_state->enable,
10616 new_crtc_state->active,
10617 new_crtc_state->planes_changed,
10618 new_crtc_state->mode_changed,
10619 new_crtc_state->active_changed,
10620 new_crtc_state->connectors_changed);
10622 /* Remove stream for any changed/disabled CRTC */
10625 if (!dm_old_crtc_state->stream)
10628 if (dm_new_crtc_state->stream &&
10629 is_timing_unchanged_for_freesync(new_crtc_state,
10631 new_crtc_state->mode_changed = false;
10633 "Mode change not required for front porch change, "
10634 "setting mode_changed to %d",
10635 new_crtc_state->mode_changed);
10637 set_freesync_fixed_config(dm_new_crtc_state);
10640 } else if (aconnector &&
10641 is_freesync_video_mode(&new_crtc_state->mode,
10643 struct drm_display_mode *high_mode;
10645 high_mode = get_highest_refresh_rate_mode(aconnector, false);
10646 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10647 set_freesync_fixed_config(dm_new_crtc_state);
10651 ret = dm_atomic_get_state(state, &dm_state);
10655 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10658 /* i.e. reset mode */
10659 if (dc_remove_stream_from_ctx(
10662 dm_old_crtc_state->stream) != DC_OK) {
10667 dc_stream_release(dm_old_crtc_state->stream);
10668 dm_new_crtc_state->stream = NULL;
10670 reset_freesync_config_for_crtc(dm_new_crtc_state);
10672 *lock_and_validation_needed = true;
10674 } else {/* Add stream for any updated/enabled CRTC */
10676 * Quick fix to prevent NULL pointer on new_stream when
10677 * added MST connectors not found in existing crtc_state in the chained mode
10678 * TODO: need to dig out the root cause of that
10683 if (modereset_required(new_crtc_state))
10686 if (modeset_required(new_crtc_state, new_stream,
10687 dm_old_crtc_state->stream)) {
10689 WARN_ON(dm_new_crtc_state->stream);
10691 ret = dm_atomic_get_state(state, &dm_state);
10695 dm_new_crtc_state->stream = new_stream;
10697 dc_stream_retain(new_stream);
10699 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10702 if (dc_add_stream_to_ctx(
10705 dm_new_crtc_state->stream) != DC_OK) {
10710 *lock_and_validation_needed = true;
10715 /* Release extra reference */
10717 dc_stream_release(new_stream);
10720 * We want to do dc stream updates that do not require a
10721 * full modeset below.
10723 if (!(enable && aconnector && new_crtc_state->active))
10726 * Given above conditions, the dc state cannot be NULL because:
10727 * 1. We're in the process of enabling CRTCs (just been added
10728 * to the dc context, or already is on the context)
10729 * 2. Has a valid connector attached, and
10730 * 3. Is currently active and enabled.
10731 * => The dc stream state currently exists.
10733 BUG_ON(dm_new_crtc_state->stream == NULL);
10735 /* Scaling or underscan settings */
10736 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10737 drm_atomic_crtc_needs_modeset(new_crtc_state))
10738 update_stream_scaling_settings(
10739 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10742 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10745 * Color management settings. We also update color properties
10746 * when a modeset is needed, to ensure it gets reprogrammed.
10748 if (dm_new_crtc_state->base.color_mgmt_changed ||
10749 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10750 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10755 /* Update Freesync settings. */
10756 get_freesync_config_for_crtc(dm_new_crtc_state,
10757 dm_new_conn_state);
10763 dc_stream_release(new_stream);
10767 static bool should_reset_plane(struct drm_atomic_state *state,
10768 struct drm_plane *plane,
10769 struct drm_plane_state *old_plane_state,
10770 struct drm_plane_state *new_plane_state)
10772 struct drm_plane *other;
10773 struct drm_plane_state *old_other_state, *new_other_state;
10774 struct drm_crtc_state *new_crtc_state;
10778 * TODO: Remove this hack once the checks below are sufficient
10779 * enough to determine when we need to reset all the planes on
10782 if (state->allow_modeset)
10785 /* Exit early if we know that we're adding or removing the plane. */
10786 if (old_plane_state->crtc != new_plane_state->crtc)
10789 /* old crtc == new_crtc == NULL, plane not in context. */
10790 if (!new_plane_state->crtc)
10794 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10796 if (!new_crtc_state)
10799 /* CRTC Degamma changes currently require us to recreate planes. */
10800 if (new_crtc_state->color_mgmt_changed)
10803 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10807 * If there are any new primary or overlay planes being added or
10808 * removed then the z-order can potentially change. To ensure
10809 * correct z-order and pipe acquisition the current DC architecture
10810 * requires us to remove and recreate all existing planes.
10812 * TODO: Come up with a more elegant solution for this.
10814 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10815 struct amdgpu_framebuffer *old_afb, *new_afb;
10816 if (other->type == DRM_PLANE_TYPE_CURSOR)
10819 if (old_other_state->crtc != new_plane_state->crtc &&
10820 new_other_state->crtc != new_plane_state->crtc)
10823 if (old_other_state->crtc != new_other_state->crtc)
10826 /* Src/dst size and scaling updates. */
10827 if (old_other_state->src_w != new_other_state->src_w ||
10828 old_other_state->src_h != new_other_state->src_h ||
10829 old_other_state->crtc_w != new_other_state->crtc_w ||
10830 old_other_state->crtc_h != new_other_state->crtc_h)
10833 /* Rotation / mirroring updates. */
10834 if (old_other_state->rotation != new_other_state->rotation)
10837 /* Blending updates. */
10838 if (old_other_state->pixel_blend_mode !=
10839 new_other_state->pixel_blend_mode)
10842 /* Alpha updates. */
10843 if (old_other_state->alpha != new_other_state->alpha)
10846 /* Colorspace changes. */
10847 if (old_other_state->color_range != new_other_state->color_range ||
10848 old_other_state->color_encoding != new_other_state->color_encoding)
10851 /* Framebuffer checks fall at the end. */
10852 if (!old_other_state->fb || !new_other_state->fb)
10855 /* Pixel format changes can require bandwidth updates. */
10856 if (old_other_state->fb->format != new_other_state->fb->format)
10859 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10860 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10862 /* Tiling and DCC changes also require bandwidth updates. */
10863 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10864 old_afb->base.modifier != new_afb->base.modifier)
10871 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10872 struct drm_plane_state *new_plane_state,
10873 struct drm_framebuffer *fb)
10875 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10876 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10877 unsigned int pitch;
10880 if (fb->width > new_acrtc->max_cursor_width ||
10881 fb->height > new_acrtc->max_cursor_height) {
10882 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10883 new_plane_state->fb->width,
10884 new_plane_state->fb->height);
10887 if (new_plane_state->src_w != fb->width << 16 ||
10888 new_plane_state->src_h != fb->height << 16) {
10889 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10893 /* Pitch in pixels */
10894 pitch = fb->pitches[0] / fb->format->cpp[0];
10896 if (fb->width != pitch) {
10897 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10906 /* FB pitch is supported by cursor plane */
10909 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10913 /* Core DRM takes care of checking FB modifiers, so we only need to
10914 * check tiling flags when the FB doesn't have a modifier. */
10915 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10916 if (adev->family < AMDGPU_FAMILY_AI) {
10917 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10918 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10919 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10921 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10924 DRM_DEBUG_ATOMIC("Cursor FB not linear");
10932 static int dm_update_plane_state(struct dc *dc,
10933 struct drm_atomic_state *state,
10934 struct drm_plane *plane,
10935 struct drm_plane_state *old_plane_state,
10936 struct drm_plane_state *new_plane_state,
10938 bool *lock_and_validation_needed)
10941 struct dm_atomic_state *dm_state = NULL;
10942 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10943 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10944 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10945 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10946 struct amdgpu_crtc *new_acrtc;
10951 new_plane_crtc = new_plane_state->crtc;
10952 old_plane_crtc = old_plane_state->crtc;
10953 dm_new_plane_state = to_dm_plane_state(new_plane_state);
10954 dm_old_plane_state = to_dm_plane_state(old_plane_state);
10956 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10957 if (!enable || !new_plane_crtc ||
10958 drm_atomic_plane_disabling(plane->state, new_plane_state))
10961 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10963 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10964 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10968 if (new_plane_state->fb) {
10969 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10970 new_plane_state->fb);
10978 needs_reset = should_reset_plane(state, plane, old_plane_state,
10981 /* Remove any changed/removed planes */
10986 if (!old_plane_crtc)
10989 old_crtc_state = drm_atomic_get_old_crtc_state(
10990 state, old_plane_crtc);
10991 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10993 if (!dm_old_crtc_state->stream)
10996 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10997 plane->base.id, old_plane_crtc->base.id);
10999 ret = dm_atomic_get_state(state, &dm_state);
11003 if (!dc_remove_plane_from_context(
11005 dm_old_crtc_state->stream,
11006 dm_old_plane_state->dc_state,
11007 dm_state->context)) {
11013 dc_plane_state_release(dm_old_plane_state->dc_state);
11014 dm_new_plane_state->dc_state = NULL;
11016 *lock_and_validation_needed = true;
11018 } else { /* Add new planes */
11019 struct dc_plane_state *dc_new_plane_state;
11021 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
11024 if (!new_plane_crtc)
11027 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
11028 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11030 if (!dm_new_crtc_state->stream)
11036 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
11040 WARN_ON(dm_new_plane_state->dc_state);
11042 dc_new_plane_state = dc_create_plane_state(dc);
11043 if (!dc_new_plane_state)
11046 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
11047 plane->base.id, new_plane_crtc->base.id);
11049 ret = fill_dc_plane_attributes(
11050 drm_to_adev(new_plane_crtc->dev),
11051 dc_new_plane_state,
11055 dc_plane_state_release(dc_new_plane_state);
11059 ret = dm_atomic_get_state(state, &dm_state);
11061 dc_plane_state_release(dc_new_plane_state);
11066 * Any atomic check errors that occur after this will
11067 * not need a release. The plane state will be attached
11068 * to the stream, and therefore part of the atomic
11069 * state. It'll be released when the atomic state is
11072 if (!dc_add_plane_to_context(
11074 dm_new_crtc_state->stream,
11075 dc_new_plane_state,
11076 dm_state->context)) {
11078 dc_plane_state_release(dc_new_plane_state);
11082 dm_new_plane_state->dc_state = dc_new_plane_state;
11084 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
11086 /* Tell DC to do a full surface update every time there
11087 * is a plane change. Inefficient, but works for now.
11089 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
11091 *lock_and_validation_needed = true;
11098 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
11099 int *src_w, int *src_h)
11101 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
11102 case DRM_MODE_ROTATE_90:
11103 case DRM_MODE_ROTATE_270:
11104 *src_w = plane_state->src_h >> 16;
11105 *src_h = plane_state->src_w >> 16;
11107 case DRM_MODE_ROTATE_0:
11108 case DRM_MODE_ROTATE_180:
11110 *src_w = plane_state->src_w >> 16;
11111 *src_h = plane_state->src_h >> 16;
11116 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
11117 struct drm_crtc *crtc,
11118 struct drm_crtc_state *new_crtc_state)
11120 struct drm_plane *cursor = crtc->cursor, *underlying;
11121 struct drm_plane_state *new_cursor_state, *new_underlying_state;
11123 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
11124 int cursor_src_w, cursor_src_h;
11125 int underlying_src_w, underlying_src_h;
11127 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
11128 * cursor per pipe but it's going to inherit the scaling and
11129 * positioning from the underlying pipe. Check the cursor plane's
11130 * blending properties match the underlying planes'. */
11132 new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
11133 if (!new_cursor_state || !new_cursor_state->fb) {
11137 dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
11138 cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
11139 cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
11141 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
11142 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
11143 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
11146 /* Ignore disabled planes */
11147 if (!new_underlying_state->fb)
11150 dm_get_oriented_plane_size(new_underlying_state,
11151 &underlying_src_w, &underlying_src_h);
11152 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
11153 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
11155 if (cursor_scale_w != underlying_scale_w ||
11156 cursor_scale_h != underlying_scale_h) {
11157 drm_dbg_atomic(crtc->dev,
11158 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
11159 cursor->base.id, cursor->name, underlying->base.id, underlying->name);
11163 /* If this plane covers the whole CRTC, no need to check planes underneath */
11164 if (new_underlying_state->crtc_x <= 0 &&
11165 new_underlying_state->crtc_y <= 0 &&
11166 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
11167 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
11174 #if defined(CONFIG_DRM_AMD_DC_DCN)
11175 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
11177 struct drm_connector *connector;
11178 struct drm_connector_state *conn_state, *old_conn_state;
11179 struct amdgpu_dm_connector *aconnector = NULL;
11181 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
11182 if (!conn_state->crtc)
11183 conn_state = old_conn_state;
11185 if (conn_state->crtc != crtc)
11188 aconnector = to_amdgpu_dm_connector(connector);
11189 if (!aconnector->port || !aconnector->mst_port)
11198 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
11203 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
11204 * @dev: The DRM device
11205 * @state: The atomic state to commit
11207 * Validate that the given atomic state is programmable by DC into hardware.
11208 * This involves constructing a &struct dc_state reflecting the new hardware
11209 * state we wish to commit, then querying DC to see if it is programmable. It's
11210 * important not to modify the existing DC state. Otherwise, atomic_check
11211 * may unexpectedly commit hardware changes.
11213 * When validating the DC state, it's important that the right locks are
11214 * acquired. For full updates case which removes/adds/updates streams on one
11215 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
11216 * that any such full update commit will wait for completion of any outstanding
11217 * flip using DRMs synchronization events.
11219 * Note that DM adds the affected connectors for all CRTCs in state, when that
11220 * might not seem necessary. This is because DC stream creation requires the
11221 * DC sink, which is tied to the DRM connector state. Cleaning this up should
11222 * be possible but non-trivial - a possible TODO item.
11224 * Return: -Error code if validation failed.
11226 static int amdgpu_dm_atomic_check(struct drm_device *dev,
11227 struct drm_atomic_state *state)
11229 struct amdgpu_device *adev = drm_to_adev(dev);
11230 struct dm_atomic_state *dm_state = NULL;
11231 struct dc *dc = adev->dm.dc;
11232 struct drm_connector *connector;
11233 struct drm_connector_state *old_con_state, *new_con_state;
11234 struct drm_crtc *crtc;
11235 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
11236 struct drm_plane *plane;
11237 struct drm_plane_state *old_plane_state, *new_plane_state;
11238 enum dc_status status;
11240 bool lock_and_validation_needed = false;
11241 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
11242 #if defined(CONFIG_DRM_AMD_DC_DCN)
11243 struct dsc_mst_fairness_vars vars[MAX_PIPES];
11244 struct drm_dp_mst_topology_state *mst_state;
11245 struct drm_dp_mst_topology_mgr *mgr;
11248 trace_amdgpu_dm_atomic_check_begin(state);
11250 ret = drm_atomic_helper_check_modeset(dev, state);
11252 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
11256 /* Check connector changes */
11257 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11258 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11259 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11261 /* Skip connectors that are disabled or part of modeset already. */
11262 if (!old_con_state->crtc && !new_con_state->crtc)
11265 if (!new_con_state->crtc)
11268 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
11269 if (IS_ERR(new_crtc_state)) {
11270 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
11271 ret = PTR_ERR(new_crtc_state);
11275 if (dm_old_con_state->abm_level !=
11276 dm_new_con_state->abm_level)
11277 new_crtc_state->connectors_changed = true;
11280 #if defined(CONFIG_DRM_AMD_DC_DCN)
11281 if (dc_resource_is_dsc_encoding_supported(dc)) {
11282 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11283 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11284 ret = add_affected_mst_dsc_crtcs(state, crtc);
11286 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
11291 if (!pre_validate_dsc(state, &dm_state, vars)) {
11297 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11298 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11300 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
11301 !new_crtc_state->color_mgmt_changed &&
11302 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11303 dm_old_crtc_state->dsc_force_changed == false)
11306 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11308 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
11312 if (!new_crtc_state->enable)
11315 ret = drm_atomic_add_affected_connectors(state, crtc);
11317 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11321 ret = drm_atomic_add_affected_planes(state, crtc);
11323 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11327 if (dm_old_crtc_state->dsc_force_changed)
11328 new_crtc_state->mode_changed = true;
11332 * Add all primary and overlay planes on the CRTC to the state
11333 * whenever a plane is enabled to maintain correct z-ordering
11334 * and to enable fast surface updates.
11336 drm_for_each_crtc(crtc, dev) {
11337 bool modified = false;
11339 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11340 if (plane->type == DRM_PLANE_TYPE_CURSOR)
11343 if (new_plane_state->crtc == crtc ||
11344 old_plane_state->crtc == crtc) {
11353 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11354 if (plane->type == DRM_PLANE_TYPE_CURSOR)
11358 drm_atomic_get_plane_state(state, plane);
11360 if (IS_ERR(new_plane_state)) {
11361 ret = PTR_ERR(new_plane_state);
11362 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11368 /* Remove exiting planes if they are modified */
11369 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11370 ret = dm_update_plane_state(dc, state, plane,
11374 &lock_and_validation_needed);
11376 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11381 /* Disable all crtcs which require disable */
11382 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11383 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11387 &lock_and_validation_needed);
11389 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11394 /* Enable all crtcs which require enable */
11395 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11396 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11400 &lock_and_validation_needed);
11402 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11407 /* Add new/modified planes */
11408 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11409 ret = dm_update_plane_state(dc, state, plane,
11413 &lock_and_validation_needed);
11415 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11420 /* Run this here since we want to validate the streams we created */
11421 ret = drm_atomic_helper_check_planes(dev, state);
11423 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11427 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11428 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11429 if (dm_new_crtc_state->mpo_requested)
11430 DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11433 /* Check cursor planes scaling */
11434 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11435 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11437 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11442 if (state->legacy_cursor_update) {
11444 * This is a fast cursor update coming from the plane update
11445 * helper, check if it can be done asynchronously for better
11448 state->async_update =
11449 !drm_atomic_helper_async_check(dev, state);
11452 * Skip the remaining global validation if this is an async
11453 * update. Cursor updates can be done without affecting
11454 * state or bandwidth calcs and this avoids the performance
11455 * penalty of locking the private state object and
11456 * allocating a new dc_state.
11458 if (state->async_update)
11462 /* Check scaling and underscan changes*/
11463 /* TODO Removed scaling changes validation due to inability to commit
11464 * new stream into context w\o causing full reset. Need to
11465 * decide how to handle.
11467 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11468 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11469 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11470 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11472 /* Skip any modesets/resets */
11473 if (!acrtc || drm_atomic_crtc_needs_modeset(
11474 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11477 /* Skip any thing not scale or underscan changes */
11478 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11481 lock_and_validation_needed = true;
11484 #if defined(CONFIG_DRM_AMD_DC_DCN)
11485 /* set the slot info for each mst_state based on the link encoding format */
11486 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11487 struct amdgpu_dm_connector *aconnector;
11488 struct drm_connector *connector;
11489 struct drm_connector_list_iter iter;
11490 u8 link_coding_cap;
11492 if (!mgr->mst_state )
11495 drm_connector_list_iter_begin(dev, &iter);
11496 drm_for_each_connector_iter(connector, &iter) {
11497 int id = connector->index;
11499 if (id == mst_state->mgr->conn_base_id) {
11500 aconnector = to_amdgpu_dm_connector(connector);
11501 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11502 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11507 drm_connector_list_iter_end(&iter);
11512 * Streams and planes are reset when there are changes that affect
11513 * bandwidth. Anything that affects bandwidth needs to go through
11514 * DC global validation to ensure that the configuration can be applied
11517 * We have to currently stall out here in atomic_check for outstanding
11518 * commits to finish in this case because our IRQ handlers reference
11519 * DRM state directly - we can end up disabling interrupts too early
11522 * TODO: Remove this stall and drop DM state private objects.
11524 if (lock_and_validation_needed) {
11525 ret = dm_atomic_get_state(state, &dm_state);
11527 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11531 ret = do_aquire_global_lock(dev, state);
11533 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11537 #if defined(CONFIG_DRM_AMD_DC_DCN)
11538 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11539 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11544 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11546 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11552 * Perform validation of MST topology in the state:
11553 * We need to perform MST atomic check before calling
11554 * dc_validate_global_state(), or there is a chance
11555 * to get stuck in an infinite loop and hang eventually.
11557 ret = drm_dp_mst_atomic_check(state);
11559 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11562 status = dc_validate_global_state(dc, dm_state->context, true);
11563 if (status != DC_OK) {
11564 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11565 dc_status_to_str(status), status);
11571 * The commit is a fast update. Fast updates shouldn't change
11572 * the DC context, affect global validation, and can have their
11573 * commit work done in parallel with other commits not touching
11574 * the same resource. If we have a new DC context as part of
11575 * the DM atomic state from validation we need to free it and
11576 * retain the existing one instead.
11578 * Furthermore, since the DM atomic state only contains the DC
11579 * context and can safely be annulled, we can free the state
11580 * and clear the associated private object now to free
11581 * some memory and avoid a possible use-after-free later.
11584 for (i = 0; i < state->num_private_objs; i++) {
11585 struct drm_private_obj *obj = state->private_objs[i].ptr;
11587 if (obj->funcs == adev->dm.atomic_obj.funcs) {
11588 int j = state->num_private_objs-1;
11590 dm_atomic_destroy_state(obj,
11591 state->private_objs[i].state);
11593 /* If i is not at the end of the array then the
11594 * last element needs to be moved to where i was
11595 * before the array can safely be truncated.
11598 state->private_objs[i] =
11599 state->private_objs[j];
11601 state->private_objs[j].ptr = NULL;
11602 state->private_objs[j].state = NULL;
11603 state->private_objs[j].old_state = NULL;
11604 state->private_objs[j].new_state = NULL;
11606 state->num_private_objs = j;
11612 /* Store the overall update type for use later in atomic check. */
11613 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11614 struct dm_crtc_state *dm_new_crtc_state =
11615 to_dm_crtc_state(new_crtc_state);
11617 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11622 /* Must be success */
11625 trace_amdgpu_dm_atomic_check_finish(state, ret);
11630 if (ret == -EDEADLK)
11631 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11632 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11633 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11635 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11637 trace_amdgpu_dm_atomic_check_finish(state, ret);
11642 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11643 struct amdgpu_dm_connector *amdgpu_dm_connector)
11646 bool capable = false;
11648 if (amdgpu_dm_connector->dc_link &&
11649 dm_helpers_dp_read_dpcd(
11651 amdgpu_dm_connector->dc_link,
11652 DP_DOWN_STREAM_PORT_COUNT,
11654 sizeof(dpcd_data))) {
11655 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11661 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11662 unsigned int offset,
11663 unsigned int total_length,
11665 unsigned int length,
11666 struct amdgpu_hdmi_vsdb_info *vsdb)
11669 union dmub_rb_cmd cmd;
11670 struct dmub_cmd_send_edid_cea *input;
11671 struct dmub_cmd_edid_cea_output *output;
11673 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11676 memset(&cmd, 0, sizeof(cmd));
11678 input = &cmd.edid_cea.data.input;
11680 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11681 cmd.edid_cea.header.sub_type = 0;
11682 cmd.edid_cea.header.payload_bytes =
11683 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11684 input->offset = offset;
11685 input->length = length;
11686 input->cea_total_length = total_length;
11687 memcpy(input->payload, data, length);
11689 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11691 DRM_ERROR("EDID CEA parser failed\n");
11695 output = &cmd.edid_cea.data.output;
11697 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11698 if (!output->ack.success) {
11699 DRM_ERROR("EDID CEA ack failed at offset %d\n",
11700 output->ack.offset);
11702 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11703 if (!output->amd_vsdb.vsdb_found)
11706 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11707 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11708 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11709 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11711 DRM_WARN("Unknown EDID CEA parser results\n");
11718 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11719 uint8_t *edid_ext, int len,
11720 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11724 /* send extension block to DMCU for parsing */
11725 for (i = 0; i < len; i += 8) {
11729 /* send 8 bytes a time */
11730 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11734 /* EDID block sent completed, expect result */
11735 int version, min_rate, max_rate;
11737 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11739 /* amd vsdb found */
11740 vsdb_info->freesync_supported = 1;
11741 vsdb_info->amd_vsdb_version = version;
11742 vsdb_info->min_refresh_rate_hz = min_rate;
11743 vsdb_info->max_refresh_rate_hz = max_rate;
11751 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11759 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11760 uint8_t *edid_ext, int len,
11761 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11765 /* send extension block to DMCU for parsing */
11766 for (i = 0; i < len; i += 8) {
11767 /* send 8 bytes a time */
11768 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11772 return vsdb_info->freesync_supported;
11775 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11776 uint8_t *edid_ext, int len,
11777 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11779 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11781 if (adev->dm.dmub_srv)
11782 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11784 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11787 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11788 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11790 uint8_t *edid_ext = NULL;
11792 bool valid_vsdb_found = false;
11794 /*----- drm_find_cea_extension() -----*/
11795 /* No EDID or EDID extensions */
11796 if (edid == NULL || edid->extensions == 0)
11799 /* Find CEA extension */
11800 for (i = 0; i < edid->extensions; i++) {
11801 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11802 if (edid_ext[0] == CEA_EXT)
11806 if (i == edid->extensions)
11809 /*----- cea_db_offsets() -----*/
11810 if (edid_ext[0] != CEA_EXT)
11813 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11815 return valid_vsdb_found ? i : -ENODEV;
11818 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11822 struct detailed_timing *timing;
11823 struct detailed_non_pixel *data;
11824 struct detailed_data_monitor_range *range;
11825 struct amdgpu_dm_connector *amdgpu_dm_connector =
11826 to_amdgpu_dm_connector(connector);
11827 struct dm_connector_state *dm_con_state = NULL;
11828 struct dc_sink *sink;
11830 struct drm_device *dev = connector->dev;
11831 struct amdgpu_device *adev = drm_to_adev(dev);
11832 bool freesync_capable = false;
11833 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11835 if (!connector->state) {
11836 DRM_ERROR("%s - Connector has no state", __func__);
11840 sink = amdgpu_dm_connector->dc_sink ?
11841 amdgpu_dm_connector->dc_sink :
11842 amdgpu_dm_connector->dc_em_sink;
11844 if (!edid || !sink) {
11845 dm_con_state = to_dm_connector_state(connector->state);
11847 amdgpu_dm_connector->min_vfreq = 0;
11848 amdgpu_dm_connector->max_vfreq = 0;
11849 amdgpu_dm_connector->pixel_clock_mhz = 0;
11850 connector->display_info.monitor_range.min_vfreq = 0;
11851 connector->display_info.monitor_range.max_vfreq = 0;
11852 freesync_capable = false;
11857 dm_con_state = to_dm_connector_state(connector->state);
11859 if (!adev->dm.freesync_module)
11863 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11864 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11865 bool edid_check_required = false;
11868 edid_check_required = is_dp_capable_without_timing_msa(
11870 amdgpu_dm_connector);
11873 if (edid_check_required == true && (edid->version > 1 ||
11874 (edid->version == 1 && edid->revision > 1))) {
11875 for (i = 0; i < 4; i++) {
11877 timing = &edid->detailed_timings[i];
11878 data = &timing->data.other_data;
11879 range = &data->data.range;
11881 * Check if monitor has continuous frequency mode
11883 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11886 * Check for flag range limits only. If flag == 1 then
11887 * no additional timing information provided.
11888 * Default GTF, GTF Secondary curve and CVT are not
11891 if (range->flags != 1)
11894 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11895 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11896 amdgpu_dm_connector->pixel_clock_mhz =
11897 range->pixel_clock_mhz * 10;
11899 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11900 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11905 if (amdgpu_dm_connector->max_vfreq -
11906 amdgpu_dm_connector->min_vfreq > 10) {
11908 freesync_capable = true;
11911 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11912 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11913 if (i >= 0 && vsdb_info.freesync_supported) {
11914 timing = &edid->detailed_timings[i];
11915 data = &timing->data.other_data;
11917 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11918 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11919 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11920 freesync_capable = true;
11922 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11923 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11929 dm_con_state->freesync_capable = freesync_capable;
11931 if (connector->vrr_capable_property)
11932 drm_connector_set_vrr_capable_property(connector,
11936 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11938 struct amdgpu_device *adev = drm_to_adev(dev);
11939 struct dc *dc = adev->dm.dc;
11942 mutex_lock(&adev->dm.dc_lock);
11943 if (dc->current_state) {
11944 for (i = 0; i < dc->current_state->stream_count; ++i)
11945 dc->current_state->streams[i]
11946 ->triggered_crtc_reset.enabled =
11947 adev->dm.force_timing_sync;
11949 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11950 dc_trigger_sync(dc, dc->current_state);
11952 mutex_unlock(&adev->dm.dc_lock);
11955 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11956 uint32_t value, const char *func_name)
11958 #ifdef DM_CHECK_ADDR_0
11959 if (address == 0) {
11960 DC_ERR("invalid register write. address = 0");
11964 cgs_write_register(ctx->cgs_device, address, value);
11965 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11968 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11969 const char *func_name)
11972 #ifdef DM_CHECK_ADDR_0
11973 if (address == 0) {
11974 DC_ERR("invalid register read; address = 0\n");
11979 if (ctx->dmub_srv &&
11980 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11981 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11986 value = cgs_read_register(ctx->cgs_device, address);
11988 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11993 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11994 struct dc_context *ctx,
11995 uint8_t status_type,
11996 uint32_t *operation_result)
11998 struct amdgpu_device *adev = ctx->driver_context;
11999 int return_status = -1;
12000 struct dmub_notification *p_notify = adev->dm.dmub_notify;
12003 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
12004 return_status = p_notify->aux_reply.length;
12005 *operation_result = p_notify->result;
12006 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
12007 *operation_result = AUX_RET_ERROR_TIMEOUT;
12008 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
12009 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
12011 *operation_result = AUX_RET_ERROR_UNKNOWN;
12014 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
12016 *operation_result = p_notify->sc_status;
12018 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
12022 return return_status;
12025 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
12026 unsigned int link_index, void *cmd_payload, void *operation_result)
12028 struct amdgpu_device *adev = ctx->driver_context;
12032 dc_process_dmub_aux_transfer_async(ctx->dc,
12033 link_index, (struct aux_payload *)cmd_payload);
12034 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
12035 (struct set_config_cmd_payload *)cmd_payload,
12036 adev->dm.dmub_notify)) {
12037 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
12038 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
12039 (uint32_t *)operation_result);
12042 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
12044 DRM_ERROR("wait_for_completion_timeout timeout!");
12045 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
12046 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
12047 (uint32_t *)operation_result);
12051 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
12052 struct aux_payload *payload = (struct aux_payload *)cmd_payload;
12054 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
12055 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
12056 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
12057 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
12058 adev->dm.dmub_notify->aux_reply.length);
12063 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
12064 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
12065 (uint32_t *)operation_result);
12069 * Check whether seamless boot is supported.
12071 * So far we only support seamless boot on CHIP_VANGOGH.
12072 * If everything goes well, we may consider expanding
12073 * seamless boot to other ASICs.
12075 bool check_seamless_boot_capability(struct amdgpu_device *adev)
12077 switch (adev->asic_type) {
12079 if (!adev->mman.keep_stolen_vga_memory)