2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/display/drm_hdcp_helper.h>
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
63 #include "amdgpu_dm_psr.h"
65 #include "ivsrcid/ivsrcid_vislands30.h"
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 #include <linux/dmi.h>
77 #include <drm/display/drm_dp_mst_helper.h>
78 #include <drm/display/drm_hdmi_helper.h>
79 #include <drm/drm_atomic.h>
80 #include <drm/drm_atomic_uapi.h>
81 #include <drm/drm_atomic_helper.h>
82 #include <drm/drm_fb_helper.h>
83 #include <drm/drm_fourcc.h>
84 #include <drm/drm_edid.h>
85 #include <drm/drm_vblank.h>
86 #include <drm/drm_audio_component.h>
88 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
90 #include "dcn/dcn_1_0_offset.h"
91 #include "dcn/dcn_1_0_sh_mask.h"
92 #include "soc15_hw_ip.h"
93 #include "soc15_common.h"
94 #include "vega10_ip_offset.h"
96 #include "soc15_common.h"
98 #include "gc/gc_11_0_0_offset.h"
99 #include "gc/gc_11_0_0_sh_mask.h"
101 #include "modules/inc/mod_freesync.h"
102 #include "modules/power/power_helpers.h"
103 #include "modules/inc/mod_info_packet.h"
105 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
107 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
109 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
111 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
113 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
115 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
117 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
119 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
120 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
121 #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
122 MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
123 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
124 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
125 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
126 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
128 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
129 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
130 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
131 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
133 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
134 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
136 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
137 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
139 /* Number of bytes in PSP header for firmware. */
140 #define PSP_HEADER_BYTES 0x100
142 /* Number of bytes in PSP footer for firmware. */
143 #define PSP_FOOTER_BYTES 0x100
148 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
149 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
150 * requests into DC requests, and DC responses into DRM responses.
152 * The root control structure is &struct amdgpu_display_manager.
155 /* basic init/fini API */
156 static int amdgpu_dm_init(struct amdgpu_device *adev);
157 static void amdgpu_dm_fini(struct amdgpu_device *adev);
158 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
160 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
162 switch (link->dpcd_caps.dongle_type) {
163 case DISPLAY_DONGLE_NONE:
164 return DRM_MODE_SUBCONNECTOR_Native;
165 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
166 return DRM_MODE_SUBCONNECTOR_VGA;
167 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
168 case DISPLAY_DONGLE_DP_DVI_DONGLE:
169 return DRM_MODE_SUBCONNECTOR_DVID;
170 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
171 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
172 return DRM_MODE_SUBCONNECTOR_HDMIA;
173 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
175 return DRM_MODE_SUBCONNECTOR_Unknown;
179 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
181 struct dc_link *link = aconnector->dc_link;
182 struct drm_connector *connector = &aconnector->base;
183 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
185 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
188 if (aconnector->dc_sink)
189 subconnector = get_subconnector_type(link);
191 drm_object_property_set_value(&connector->base,
192 connector->dev->mode_config.dp_subconnector_property,
197 * initializes drm_device display related structures, based on the information
198 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
199 * drm_encoder, drm_mode_config
201 * Returns 0 on success
203 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
204 /* removes and deallocates the drm structures, created by the above function */
205 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
207 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
208 struct drm_plane *plane,
209 unsigned long possible_crtcs,
210 const struct dc_plane_cap *plane_cap);
211 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
212 struct drm_plane *plane,
213 uint32_t link_index);
214 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
215 struct amdgpu_dm_connector *amdgpu_dm_connector,
217 struct amdgpu_encoder *amdgpu_encoder);
218 static int amdgpu_dm_encoder_init(struct drm_device *dev,
219 struct amdgpu_encoder *aencoder,
220 uint32_t link_index);
222 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
224 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
226 static int amdgpu_dm_atomic_check(struct drm_device *dev,
227 struct drm_atomic_state *state);
229 static void handle_cursor_update(struct drm_plane *plane,
230 struct drm_plane_state *old_plane_state);
232 static const struct drm_format_info *
233 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
235 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
236 static void handle_hpd_rx_irq(void *param);
239 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
240 struct drm_crtc_state *new_crtc_state);
242 * dm_vblank_get_counter
245 * Get counter for number of vertical blanks
248 * struct amdgpu_device *adev - [in] desired amdgpu device
249 * int disp_idx - [in] which CRTC to get the counter from
252 * Counter for vertical blanks
254 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
256 if (crtc >= adev->mode_info.num_crtc)
259 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
261 if (acrtc->dm_irq_params.stream == NULL) {
262 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
267 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
271 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
272 u32 *vbl, u32 *position)
274 uint32_t v_blank_start, v_blank_end, h_position, v_position;
276 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
279 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
281 if (acrtc->dm_irq_params.stream == NULL) {
282 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
288 * TODO rework base driver to use values directly.
289 * for now parse it back into reg-format
291 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
297 *position = v_position | (h_position << 16);
298 *vbl = v_blank_start | (v_blank_end << 16);
304 static bool dm_is_idle(void *handle)
310 static int dm_wait_for_idle(void *handle)
316 static bool dm_check_soft_reset(void *handle)
321 static int dm_soft_reset(void *handle)
327 static struct amdgpu_crtc *
328 get_crtc_by_otg_inst(struct amdgpu_device *adev,
331 struct drm_device *dev = adev_to_drm(adev);
332 struct drm_crtc *crtc;
333 struct amdgpu_crtc *amdgpu_crtc;
335 if (WARN_ON(otg_inst == -1))
336 return adev->mode_info.crtcs[0];
338 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
339 amdgpu_crtc = to_amdgpu_crtc(crtc);
341 if (amdgpu_crtc->otg_inst == otg_inst)
348 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
350 return acrtc->dm_irq_params.freesync_config.state ==
351 VRR_STATE_ACTIVE_VARIABLE ||
352 acrtc->dm_irq_params.freesync_config.state ==
353 VRR_STATE_ACTIVE_FIXED;
356 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
358 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
359 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
362 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
363 struct dm_crtc_state *new_state)
365 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
367 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
374 * dm_pflip_high_irq() - Handle pageflip interrupt
375 * @interrupt_params: ignored
377 * Handles the pageflip interrupt by notifying all interested parties
378 * that the pageflip has been completed.
380 static void dm_pflip_high_irq(void *interrupt_params)
382 struct amdgpu_crtc *amdgpu_crtc;
383 struct common_irq_params *irq_params = interrupt_params;
384 struct amdgpu_device *adev = irq_params->adev;
386 struct drm_pending_vblank_event *e;
387 uint32_t vpos, hpos, v_blank_start, v_blank_end;
390 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
392 /* IRQ could occur when in initial stage */
393 /* TODO work and BO cleanup */
394 if (amdgpu_crtc == NULL) {
395 DC_LOG_PFLIP("CRTC is null, returning.\n");
399 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
401 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
402 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
403 amdgpu_crtc->pflip_status,
404 AMDGPU_FLIP_SUBMITTED,
405 amdgpu_crtc->crtc_id,
407 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
411 /* page flip completed. */
412 e = amdgpu_crtc->event;
413 amdgpu_crtc->event = NULL;
417 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
419 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
421 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
422 &v_blank_end, &hpos, &vpos) ||
423 (vpos < v_blank_start)) {
424 /* Update to correct count and vblank timestamp if racing with
425 * vblank irq. This also updates to the correct vblank timestamp
426 * even in VRR mode, as scanout is past the front-porch atm.
428 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
430 /* Wake up userspace by sending the pageflip event with proper
431 * count and timestamp of vblank of flip completion.
434 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
436 /* Event sent, so done with vblank for this flip */
437 drm_crtc_vblank_put(&amdgpu_crtc->base);
440 /* VRR active and inside front-porch: vblank count and
441 * timestamp for pageflip event will only be up to date after
442 * drm_crtc_handle_vblank() has been executed from late vblank
443 * irq handler after start of back-porch (vline 0). We queue the
444 * pageflip event for send-out by drm_crtc_handle_vblank() with
445 * updated timestamp and count, once it runs after us.
447 * We need to open-code this instead of using the helper
448 * drm_crtc_arm_vblank_event(), as that helper would
449 * call drm_crtc_accurate_vblank_count(), which we must
450 * not call in VRR mode while we are in front-porch!
453 /* sequence will be replaced by real count during send-out. */
454 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
455 e->pipe = amdgpu_crtc->crtc_id;
457 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
461 /* Keep track of vblank of this flip for flip throttling. We use the
462 * cooked hw counter, as that one incremented at start of this vblank
463 * of pageflip completion, so last_flip_vblank is the forbidden count
464 * for queueing new pageflips if vsync + VRR is enabled.
466 amdgpu_crtc->dm_irq_params.last_flip_vblank =
467 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
469 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
470 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
472 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
473 amdgpu_crtc->crtc_id, amdgpu_crtc,
474 vrr_active, (int) !e);
477 static void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
479 struct drm_crtc *crtc = &acrtc->base;
480 struct drm_device *dev = crtc->dev;
483 drm_crtc_handle_vblank(crtc);
485 spin_lock_irqsave(&dev->event_lock, flags);
487 /* Send completion event for cursor-only commits */
488 if (acrtc->event && acrtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
489 drm_crtc_send_vblank_event(crtc, acrtc->event);
490 drm_crtc_vblank_put(crtc);
494 spin_unlock_irqrestore(&dev->event_lock, flags);
497 static void dm_vupdate_high_irq(void *interrupt_params)
499 struct common_irq_params *irq_params = interrupt_params;
500 struct amdgpu_device *adev = irq_params->adev;
501 struct amdgpu_crtc *acrtc;
502 struct drm_device *drm_dev;
503 struct drm_vblank_crtc *vblank;
504 ktime_t frame_duration_ns, previous_timestamp;
508 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
511 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
512 drm_dev = acrtc->base.dev;
513 vblank = &drm_dev->vblank[acrtc->base.index];
514 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
515 frame_duration_ns = vblank->time - previous_timestamp;
517 if (frame_duration_ns > 0) {
518 trace_amdgpu_refresh_rate_track(acrtc->base.index,
520 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
521 atomic64_set(&irq_params->previous_timestamp, vblank->time);
524 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
528 /* Core vblank handling is done here after end of front-porch in
529 * vrr mode, as vblank timestamping will give valid results
530 * while now done after front-porch. This will also deliver
531 * page-flip completion events that have been queued to us
532 * if a pageflip happened inside front-porch.
535 dm_crtc_handle_vblank(acrtc);
537 /* BTR processing for pre-DCE12 ASICs */
538 if (acrtc->dm_irq_params.stream &&
539 adev->family < AMDGPU_FAMILY_AI) {
540 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
541 mod_freesync_handle_v_update(
542 adev->dm.freesync_module,
543 acrtc->dm_irq_params.stream,
544 &acrtc->dm_irq_params.vrr_params);
546 dc_stream_adjust_vmin_vmax(
548 acrtc->dm_irq_params.stream,
549 &acrtc->dm_irq_params.vrr_params.adjust);
550 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
557 * dm_crtc_high_irq() - Handles CRTC interrupt
558 * @interrupt_params: used for determining the CRTC instance
560 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
563 static void dm_crtc_high_irq(void *interrupt_params)
565 struct common_irq_params *irq_params = interrupt_params;
566 struct amdgpu_device *adev = irq_params->adev;
567 struct amdgpu_crtc *acrtc;
571 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
575 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
577 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
578 vrr_active, acrtc->dm_irq_params.active_planes);
581 * Core vblank handling at start of front-porch is only possible
582 * in non-vrr mode, as only there vblank timestamping will give
583 * valid results while done in front-porch. Otherwise defer it
584 * to dm_vupdate_high_irq after end of front-porch.
587 dm_crtc_handle_vblank(acrtc);
590 * Following stuff must happen at start of vblank, for crc
591 * computation and below-the-range btr support in vrr mode.
593 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
595 /* BTR updates need to happen before VUPDATE on Vega and above. */
596 if (adev->family < AMDGPU_FAMILY_AI)
599 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
601 if (acrtc->dm_irq_params.stream &&
602 acrtc->dm_irq_params.vrr_params.supported &&
603 acrtc->dm_irq_params.freesync_config.state ==
604 VRR_STATE_ACTIVE_VARIABLE) {
605 mod_freesync_handle_v_update(adev->dm.freesync_module,
606 acrtc->dm_irq_params.stream,
607 &acrtc->dm_irq_params.vrr_params);
609 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
610 &acrtc->dm_irq_params.vrr_params.adjust);
614 * If there aren't any active_planes then DCH HUBP may be clock-gated.
615 * In that case, pageflip completion interrupts won't fire and pageflip
616 * completion events won't get delivered. Prevent this by sending
617 * pending pageflip events from here if a flip is still pending.
619 * If any planes are enabled, use dm_pflip_high_irq() instead, to
620 * avoid race conditions between flip programming and completion,
621 * which could cause too early flip completion events.
623 if (adev->family >= AMDGPU_FAMILY_RV &&
624 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
625 acrtc->dm_irq_params.active_planes == 0) {
627 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
629 drm_crtc_vblank_put(&acrtc->base);
631 acrtc->pflip_status = AMDGPU_FLIP_NONE;
634 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
637 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
639 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
640 * DCN generation ASICs
641 * @interrupt_params: interrupt parameters
643 * Used to set crc window/read out crc value at vertical line 0 position
645 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
647 struct common_irq_params *irq_params = interrupt_params;
648 struct amdgpu_device *adev = irq_params->adev;
649 struct amdgpu_crtc *acrtc;
651 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
656 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
658 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
661 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
662 * @adev: amdgpu_device pointer
663 * @notify: dmub notification structure
665 * Dmub AUX or SET_CONFIG command completion processing callback
666 * Copies dmub notification to DM which is to be read by AUX command.
667 * issuing thread and also signals the event to wake up the thread.
669 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
670 struct dmub_notification *notify)
672 if (adev->dm.dmub_notify)
673 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
674 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
675 complete(&adev->dm.dmub_aux_transfer_done);
679 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
680 * @adev: amdgpu_device pointer
681 * @notify: dmub notification structure
683 * Dmub Hpd interrupt processing callback. Gets displayindex through the
684 * ink index and calls helper to do the processing.
686 static void dmub_hpd_callback(struct amdgpu_device *adev,
687 struct dmub_notification *notify)
689 struct amdgpu_dm_connector *aconnector;
690 struct amdgpu_dm_connector *hpd_aconnector = NULL;
691 struct drm_connector *connector;
692 struct drm_connector_list_iter iter;
693 struct dc_link *link;
694 uint8_t link_index = 0;
695 struct drm_device *dev;
700 if (notify == NULL) {
701 DRM_ERROR("DMUB HPD callback notification was NULL");
705 if (notify->link_index > adev->dm.dc->link_count) {
706 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
710 link_index = notify->link_index;
711 link = adev->dm.dc->links[link_index];
714 drm_connector_list_iter_begin(dev, &iter);
715 drm_for_each_connector_iter(connector, &iter) {
716 aconnector = to_amdgpu_dm_connector(connector);
717 if (link && aconnector->dc_link == link) {
718 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
719 hpd_aconnector = aconnector;
723 drm_connector_list_iter_end(&iter);
725 if (hpd_aconnector) {
726 if (notify->type == DMUB_NOTIFICATION_HPD)
727 handle_hpd_irq_helper(hpd_aconnector);
728 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
729 handle_hpd_rx_irq(hpd_aconnector);
734 * register_dmub_notify_callback - Sets callback for DMUB notify
735 * @adev: amdgpu_device pointer
736 * @type: Type of dmub notification
737 * @callback: Dmub interrupt callback function
738 * @dmub_int_thread_offload: offload indicator
740 * API to register a dmub callback handler for a dmub notification
741 * Also sets indicator whether callback processing to be offloaded.
742 * to dmub interrupt handling thread
743 * Return: true if successfully registered, false if there is existing registration
745 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
746 enum dmub_notification_type type,
747 dmub_notify_interrupt_callback_t callback,
748 bool dmub_int_thread_offload)
750 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
751 adev->dm.dmub_callback[type] = callback;
752 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
759 static void dm_handle_hpd_work(struct work_struct *work)
761 struct dmub_hpd_work *dmub_hpd_wrk;
763 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
765 if (!dmub_hpd_wrk->dmub_notify) {
766 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
770 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
771 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
772 dmub_hpd_wrk->dmub_notify);
775 kfree(dmub_hpd_wrk->dmub_notify);
780 #define DMUB_TRACE_MAX_READ 64
782 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
783 * @interrupt_params: used for determining the Outbox instance
785 * Handles the Outbox Interrupt
788 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
790 struct dmub_notification notify;
791 struct common_irq_params *irq_params = interrupt_params;
792 struct amdgpu_device *adev = irq_params->adev;
793 struct amdgpu_display_manager *dm = &adev->dm;
794 struct dmcub_trace_buf_entry entry = { 0 };
796 struct dmub_hpd_work *dmub_hpd_wrk;
797 struct dc_link *plink = NULL;
799 if (dc_enable_dmub_notifications(adev->dm.dc) &&
800 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
803 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify);
804 if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
805 DRM_ERROR("DM: notify type %d invalid!", notify.type);
808 if (!dm->dmub_callback[notify.type]) {
809 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
812 if (dm->dmub_thread_offload[notify.type] == true) {
813 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
815 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
818 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
819 if (!dmub_hpd_wrk->dmub_notify) {
821 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
824 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
825 if (dmub_hpd_wrk->dmub_notify)
826 memcpy(dmub_hpd_wrk->dmub_notify, ¬ify, sizeof(struct dmub_notification));
827 dmub_hpd_wrk->adev = adev;
828 if (notify.type == DMUB_NOTIFICATION_HPD) {
829 plink = adev->dm.dc->links[notify.link_index];
832 notify.hpd_status == DP_HPD_PLUG;
835 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
837 dm->dmub_callback[notify.type](adev, ¬ify);
839 } while (notify.pending_notification);
844 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
845 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
846 entry.param0, entry.param1);
848 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
849 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
855 } while (count <= DMUB_TRACE_MAX_READ);
857 if (count > DMUB_TRACE_MAX_READ)
858 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
861 static int dm_set_clockgating_state(void *handle,
862 enum amd_clockgating_state state)
867 static int dm_set_powergating_state(void *handle,
868 enum amd_powergating_state state)
873 /* Prototypes of private functions */
874 static int dm_early_init(void* handle);
876 /* Allocate memory for FBC compressed data */
877 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
879 struct drm_device *dev = connector->dev;
880 struct amdgpu_device *adev = drm_to_adev(dev);
881 struct dm_compressor_info *compressor = &adev->dm.compressor;
882 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
883 struct drm_display_mode *mode;
884 unsigned long max_size = 0;
886 if (adev->dm.dc->fbc_compressor == NULL)
889 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
892 if (compressor->bo_ptr)
896 list_for_each_entry(mode, &connector->modes, head) {
897 if (max_size < mode->htotal * mode->vtotal)
898 max_size = mode->htotal * mode->vtotal;
902 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
903 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
904 &compressor->gpu_addr, &compressor->cpu_addr);
907 DRM_ERROR("DM: Failed to initialize FBC\n");
909 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
910 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
917 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
918 int pipe, bool *enabled,
919 unsigned char *buf, int max_bytes)
921 struct drm_device *dev = dev_get_drvdata(kdev);
922 struct amdgpu_device *adev = drm_to_adev(dev);
923 struct drm_connector *connector;
924 struct drm_connector_list_iter conn_iter;
925 struct amdgpu_dm_connector *aconnector;
930 mutex_lock(&adev->dm.audio_lock);
932 drm_connector_list_iter_begin(dev, &conn_iter);
933 drm_for_each_connector_iter(connector, &conn_iter) {
934 aconnector = to_amdgpu_dm_connector(connector);
935 if (aconnector->audio_inst != port)
939 ret = drm_eld_size(connector->eld);
940 memcpy(buf, connector->eld, min(max_bytes, ret));
944 drm_connector_list_iter_end(&conn_iter);
946 mutex_unlock(&adev->dm.audio_lock);
948 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
953 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
954 .get_eld = amdgpu_dm_audio_component_get_eld,
957 static int amdgpu_dm_audio_component_bind(struct device *kdev,
958 struct device *hda_kdev, void *data)
960 struct drm_device *dev = dev_get_drvdata(kdev);
961 struct amdgpu_device *adev = drm_to_adev(dev);
962 struct drm_audio_component *acomp = data;
964 acomp->ops = &amdgpu_dm_audio_component_ops;
966 adev->dm.audio_component = acomp;
971 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
972 struct device *hda_kdev, void *data)
974 struct drm_device *dev = dev_get_drvdata(kdev);
975 struct amdgpu_device *adev = drm_to_adev(dev);
976 struct drm_audio_component *acomp = data;
980 adev->dm.audio_component = NULL;
983 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
984 .bind = amdgpu_dm_audio_component_bind,
985 .unbind = amdgpu_dm_audio_component_unbind,
988 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
995 adev->mode_info.audio.enabled = true;
997 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
999 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1000 adev->mode_info.audio.pin[i].channels = -1;
1001 adev->mode_info.audio.pin[i].rate = -1;
1002 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1003 adev->mode_info.audio.pin[i].status_bits = 0;
1004 adev->mode_info.audio.pin[i].category_code = 0;
1005 adev->mode_info.audio.pin[i].connected = false;
1006 adev->mode_info.audio.pin[i].id =
1007 adev->dm.dc->res_pool->audios[i]->inst;
1008 adev->mode_info.audio.pin[i].offset = 0;
1011 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1015 adev->dm.audio_registered = true;
1020 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
1025 if (!adev->mode_info.audio.enabled)
1028 if (adev->dm.audio_registered) {
1029 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1030 adev->dm.audio_registered = false;
1033 /* TODO: Disable audio? */
1035 adev->mode_info.audio.enabled = false;
1038 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1040 struct drm_audio_component *acomp = adev->dm.audio_component;
1042 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1043 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1045 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1050 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1052 const struct dmcub_firmware_header_v1_0 *hdr;
1053 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1054 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1055 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1056 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1057 struct abm *abm = adev->dm.dc->res_pool->abm;
1058 struct dmub_srv_hw_params hw_params;
1059 enum dmub_status status;
1060 const unsigned char *fw_inst_const, *fw_bss_data;
1061 uint32_t i, fw_inst_const_size, fw_bss_data_size;
1062 bool has_hw_support;
1065 /* DMUB isn't supported on the ASIC. */
1069 DRM_ERROR("No framebuffer info for DMUB service.\n");
1074 /* Firmware required for DMUB support. */
1075 DRM_ERROR("No firmware provided for DMUB.\n");
1079 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1080 if (status != DMUB_STATUS_OK) {
1081 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1085 if (!has_hw_support) {
1086 DRM_INFO("DMUB unsupported on ASIC\n");
1090 /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1091 status = dmub_srv_hw_reset(dmub_srv);
1092 if (status != DMUB_STATUS_OK)
1093 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1095 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1097 fw_inst_const = dmub_fw->data +
1098 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1101 fw_bss_data = dmub_fw->data +
1102 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1103 le32_to_cpu(hdr->inst_const_bytes);
1105 /* Copy firmware and bios info into FB memory. */
1106 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1107 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1109 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1111 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1112 * amdgpu_ucode_init_single_fw will load dmub firmware
1113 * fw_inst_const part to cw0; otherwise, the firmware back door load
1114 * will be done by dm_dmub_hw_init
1116 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1117 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1118 fw_inst_const_size);
1121 if (fw_bss_data_size)
1122 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1123 fw_bss_data, fw_bss_data_size);
1125 /* Copy firmware bios info into FB memory. */
1126 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1129 /* Reset regions that need to be reset. */
1130 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1131 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1133 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1134 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1136 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1137 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1139 /* Initialize hardware. */
1140 memset(&hw_params, 0, sizeof(hw_params));
1141 hw_params.fb_base = adev->gmc.fb_start;
1142 hw_params.fb_offset = adev->gmc.aper_base;
1144 /* backdoor load firmware and trigger dmub running */
1145 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1146 hw_params.load_inst_const = true;
1149 hw_params.psp_version = dmcu->psp_version;
1151 for (i = 0; i < fb_info->num_fb; ++i)
1152 hw_params.fb[i] = &fb_info->fb[i];
1154 switch (adev->ip_versions[DCE_HWIP][0]) {
1155 case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1156 hw_params.dpia_supported = true;
1157 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1163 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1164 if (status != DMUB_STATUS_OK) {
1165 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1169 /* Wait for firmware load to finish. */
1170 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1171 if (status != DMUB_STATUS_OK)
1172 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1174 /* Init DMCU and ABM if available. */
1176 dmcu->funcs->dmcu_init(dmcu);
1177 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1180 if (!adev->dm.dc->ctx->dmub_srv)
1181 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1182 if (!adev->dm.dc->ctx->dmub_srv) {
1183 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1187 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1188 adev->dm.dmcub_fw_version);
1193 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1195 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1196 enum dmub_status status;
1200 /* DMUB isn't supported on the ASIC. */
1204 status = dmub_srv_is_hw_init(dmub_srv, &init);
1205 if (status != DMUB_STATUS_OK)
1206 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1208 if (status == DMUB_STATUS_OK && init) {
1209 /* Wait for firmware load to finish. */
1210 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1211 if (status != DMUB_STATUS_OK)
1212 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1214 /* Perform the full hardware initialization. */
1215 dm_dmub_hw_init(adev);
1219 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1222 uint32_t logical_addr_low;
1223 uint32_t logical_addr_high;
1224 uint32_t agp_base, agp_bot, agp_top;
1225 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1227 memset(pa_config, 0, sizeof(*pa_config));
1229 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1230 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1232 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1234 * Raven2 has a HW issue that it is unable to use the vram which
1235 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1236 * workaround that increase system aperture high address (add 1)
1237 * to get rid of the VM fault and hardware hang.
1239 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1241 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1244 agp_bot = adev->gmc.agp_start >> 24;
1245 agp_top = adev->gmc.agp_end >> 24;
1248 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1249 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1250 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1251 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1252 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1253 page_table_base.low_part = lower_32_bits(pt_base);
1255 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1256 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1258 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1259 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1260 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1262 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1263 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1264 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1266 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1267 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1268 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1270 pa_config->is_hvm_enabled = 0;
1274 static void vblank_control_worker(struct work_struct *work)
1276 struct vblank_control_work *vblank_work =
1277 container_of(work, struct vblank_control_work, work);
1278 struct amdgpu_display_manager *dm = vblank_work->dm;
1280 mutex_lock(&dm->dc_lock);
1282 if (vblank_work->enable)
1283 dm->active_vblank_irq_count++;
1284 else if(dm->active_vblank_irq_count)
1285 dm->active_vblank_irq_count--;
1287 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1289 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1292 * Control PSR based on vblank requirements from OS
1294 * If panel supports PSR SU, there's no need to disable PSR when OS is
1295 * submitting fast atomic commits (we infer this by whether the OS
1296 * requests vblank events). Fast atomic commits will simply trigger a
1297 * full-frame-update (FFU); a specific case of selective-update (SU)
1298 * where the SU region is the full hactive*vactive region. See
1299 * fill_dc_dirty_rects().
1301 if (vblank_work->stream && vblank_work->stream->link) {
1302 if (vblank_work->enable) {
1303 if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
1304 vblank_work->stream->link->psr_settings.psr_allow_active)
1305 amdgpu_dm_psr_disable(vblank_work->stream);
1306 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1307 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1308 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1309 amdgpu_dm_psr_enable(vblank_work->stream);
1313 mutex_unlock(&dm->dc_lock);
1315 dc_stream_release(vblank_work->stream);
1320 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1322 struct hpd_rx_irq_offload_work *offload_work;
1323 struct amdgpu_dm_connector *aconnector;
1324 struct dc_link *dc_link;
1325 struct amdgpu_device *adev;
1326 enum dc_connection_type new_connection_type = dc_connection_none;
1327 unsigned long flags;
1329 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1330 aconnector = offload_work->offload_wq->aconnector;
1333 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1337 adev = drm_to_adev(aconnector->base.dev);
1338 dc_link = aconnector->dc_link;
1340 mutex_lock(&aconnector->hpd_lock);
1341 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1342 DRM_ERROR("KMS: Failed to detect connector\n");
1343 mutex_unlock(&aconnector->hpd_lock);
1345 if (new_connection_type == dc_connection_none)
1348 if (amdgpu_in_reset(adev))
1351 mutex_lock(&adev->dm.dc_lock);
1352 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1353 dc_link_dp_handle_automated_test(dc_link);
1354 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1355 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1356 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1357 dc_link_dp_handle_link_loss(dc_link);
1358 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1359 offload_work->offload_wq->is_handling_link_loss = false;
1360 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1362 mutex_unlock(&adev->dm.dc_lock);
1365 kfree(offload_work);
1369 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1371 int max_caps = dc->caps.max_links;
1373 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1375 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1377 if (!hpd_rx_offload_wq)
1381 for (i = 0; i < max_caps; i++) {
1382 hpd_rx_offload_wq[i].wq =
1383 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1385 if (hpd_rx_offload_wq[i].wq == NULL) {
1386 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1390 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1393 return hpd_rx_offload_wq;
1396 struct amdgpu_stutter_quirk {
1404 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1405 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1406 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1410 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1412 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1414 while (p && p->chip_device != 0) {
1415 if (pdev->vendor == p->chip_vendor &&
1416 pdev->device == p->chip_device &&
1417 pdev->subsystem_vendor == p->subsys_vendor &&
1418 pdev->subsystem_device == p->subsys_device &&
1419 pdev->revision == p->revision) {
1427 static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1430 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1431 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1436 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1437 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1442 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1443 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1449 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1451 const struct dmi_system_id *dmi_id;
1453 dm->aux_hpd_discon_quirk = false;
1455 dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1457 dm->aux_hpd_discon_quirk = true;
1458 DRM_INFO("aux_hpd_discon_quirk attached\n");
1462 static int amdgpu_dm_init(struct amdgpu_device *adev)
1464 struct dc_init_data init_data;
1465 #ifdef CONFIG_DRM_AMD_DC_HDCP
1466 struct dc_callback_init init_params;
1470 adev->dm.ddev = adev_to_drm(adev);
1471 adev->dm.adev = adev;
1473 /* Zero all the fields */
1474 memset(&init_data, 0, sizeof(init_data));
1475 #ifdef CONFIG_DRM_AMD_DC_HDCP
1476 memset(&init_params, 0, sizeof(init_params));
1479 mutex_init(&adev->dm.dc_lock);
1480 mutex_init(&adev->dm.audio_lock);
1481 spin_lock_init(&adev->dm.vblank_lock);
1483 if(amdgpu_dm_irq_init(adev)) {
1484 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1488 init_data.asic_id.chip_family = adev->family;
1490 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1491 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1492 init_data.asic_id.chip_id = adev->pdev->device;
1494 init_data.asic_id.vram_width = adev->gmc.vram_width;
1495 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1496 init_data.asic_id.atombios_base_address =
1497 adev->mode_info.atom_context->bios;
1499 init_data.driver = adev;
1501 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1503 if (!adev->dm.cgs_device) {
1504 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1508 init_data.cgs_device = adev->dm.cgs_device;
1510 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1512 switch (adev->ip_versions[DCE_HWIP][0]) {
1513 case IP_VERSION(2, 1, 0):
1514 switch (adev->dm.dmcub_fw_version) {
1515 case 0: /* development */
1516 case 0x1: /* linux-firmware.git hash 6d9f399 */
1517 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1518 init_data.flags.disable_dmcu = false;
1521 init_data.flags.disable_dmcu = true;
1524 case IP_VERSION(2, 0, 3):
1525 init_data.flags.disable_dmcu = true;
1531 switch (adev->asic_type) {
1534 init_data.flags.gpu_vm_support = true;
1537 switch (adev->ip_versions[DCE_HWIP][0]) {
1538 case IP_VERSION(1, 0, 0):
1539 case IP_VERSION(1, 0, 1):
1540 /* enable S/G on PCO and RV2 */
1541 if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1542 (adev->apu_flags & AMD_APU_IS_PICASSO))
1543 init_data.flags.gpu_vm_support = true;
1545 case IP_VERSION(2, 1, 0):
1546 case IP_VERSION(3, 0, 1):
1547 case IP_VERSION(3, 1, 2):
1548 case IP_VERSION(3, 1, 3):
1549 case IP_VERSION(3, 1, 5):
1550 case IP_VERSION(3, 1, 6):
1551 init_data.flags.gpu_vm_support = true;
1559 if (init_data.flags.gpu_vm_support)
1560 adev->mode_info.gpu_vm_support = true;
1562 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1563 init_data.flags.fbc_support = true;
1565 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1566 init_data.flags.multi_mon_pp_mclk_switch = true;
1568 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1569 init_data.flags.disable_fractional_pwm = true;
1571 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1572 init_data.flags.edp_no_power_sequencing = true;
1574 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1575 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1576 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1577 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1579 init_data.flags.seamless_boot_edp_requested = false;
1581 if (check_seamless_boot_capability(adev)) {
1582 init_data.flags.seamless_boot_edp_requested = true;
1583 init_data.flags.allow_seamless_boot_optimization = true;
1584 DRM_INFO("Seamless boot condition check passed\n");
1587 init_data.flags.enable_mipi_converter_optimization = true;
1589 init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
1590 init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
1592 INIT_LIST_HEAD(&adev->dm.da_list);
1594 retrieve_dmi_info(&adev->dm);
1596 /* Display Core create. */
1597 adev->dm.dc = dc_create(&init_data);
1600 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1602 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1606 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1607 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1608 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1611 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1612 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1613 if (dm_should_disable_stutter(adev->pdev))
1614 adev->dm.dc->debug.disable_stutter = true;
1616 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1617 adev->dm.dc->debug.disable_stutter = true;
1619 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1620 adev->dm.dc->debug.disable_dsc = true;
1621 adev->dm.dc->debug.disable_dsc_edp = true;
1624 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1625 adev->dm.dc->debug.disable_clock_gate = true;
1627 if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
1628 adev->dm.dc->debug.force_subvp_mclk_switch = true;
1630 r = dm_dmub_hw_init(adev);
1632 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1636 dc_hardware_init(adev->dm.dc);
1638 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1639 if (!adev->dm.hpd_rx_offload_wq) {
1640 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1644 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1645 struct dc_phy_addr_space_config pa_config;
1647 mmhub_read_system_context(adev, &pa_config);
1649 // Call the DC init_memory func
1650 dc_setup_system_context(adev->dm.dc, &pa_config);
1653 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1654 if (!adev->dm.freesync_module) {
1656 "amdgpu: failed to initialize freesync_module.\n");
1658 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1659 adev->dm.freesync_module);
1661 amdgpu_dm_init_color_mod();
1663 if (adev->dm.dc->caps.max_links > 0) {
1664 adev->dm.vblank_control_workqueue =
1665 create_singlethread_workqueue("dm_vblank_control_workqueue");
1666 if (!adev->dm.vblank_control_workqueue)
1667 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1670 #ifdef CONFIG_DRM_AMD_DC_HDCP
1671 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1672 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1674 if (!adev->dm.hdcp_workqueue)
1675 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1677 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1679 dc_init_callbacks(adev->dm.dc, &init_params);
1682 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1683 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1685 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
1686 init_completion(&adev->dm.dmub_aux_transfer_done);
1687 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1688 if (!adev->dm.dmub_notify) {
1689 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1693 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1694 if (!adev->dm.delayed_hpd_wq) {
1695 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1699 amdgpu_dm_outbox_init(adev);
1700 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1701 dmub_aux_setconfig_callback, false)) {
1702 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1705 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1706 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1709 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1710 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1715 if (amdgpu_dm_initialize_drm_device(adev)) {
1717 "amdgpu: failed to initialize sw for display support.\n");
1721 /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
1722 * It is expected that DMUB will resend any pending notifications at this point, for
1723 * example HPD from DPIA.
1725 if (dc_is_dmub_outbox_supported(adev->dm.dc))
1726 dc_enable_dmub_outbox(adev->dm.dc);
1728 /* create fake encoders for MST */
1729 dm_dp_create_fake_mst_encoders(adev);
1731 /* TODO: Add_display_info? */
1733 /* TODO use dynamic cursor width */
1734 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1735 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1737 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1739 "amdgpu: failed to initialize sw for display support.\n");
1744 DRM_DEBUG_DRIVER("KMS initialized.\n");
1748 amdgpu_dm_fini(adev);
1753 static int amdgpu_dm_early_fini(void *handle)
1755 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1757 amdgpu_dm_audio_fini(adev);
1762 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1766 if (adev->dm.vblank_control_workqueue) {
1767 destroy_workqueue(adev->dm.vblank_control_workqueue);
1768 adev->dm.vblank_control_workqueue = NULL;
1771 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1772 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1775 amdgpu_dm_destroy_drm_device(&adev->dm);
1777 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1778 if (adev->dm.crc_rd_wrk) {
1779 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1780 kfree(adev->dm.crc_rd_wrk);
1781 adev->dm.crc_rd_wrk = NULL;
1784 #ifdef CONFIG_DRM_AMD_DC_HDCP
1785 if (adev->dm.hdcp_workqueue) {
1786 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1787 adev->dm.hdcp_workqueue = NULL;
1791 dc_deinit_callbacks(adev->dm.dc);
1794 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1796 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1797 kfree(adev->dm.dmub_notify);
1798 adev->dm.dmub_notify = NULL;
1799 destroy_workqueue(adev->dm.delayed_hpd_wq);
1800 adev->dm.delayed_hpd_wq = NULL;
1803 if (adev->dm.dmub_bo)
1804 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1805 &adev->dm.dmub_bo_gpu_addr,
1806 &adev->dm.dmub_bo_cpu_addr);
1808 if (adev->dm.hpd_rx_offload_wq) {
1809 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1810 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1811 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1812 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1816 kfree(adev->dm.hpd_rx_offload_wq);
1817 adev->dm.hpd_rx_offload_wq = NULL;
1820 /* DC Destroy TODO: Replace destroy DAL */
1822 dc_destroy(&adev->dm.dc);
1824 * TODO: pageflip, vlank interrupt
1826 * amdgpu_dm_irq_fini(adev);
1829 if (adev->dm.cgs_device) {
1830 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1831 adev->dm.cgs_device = NULL;
1833 if (adev->dm.freesync_module) {
1834 mod_freesync_destroy(adev->dm.freesync_module);
1835 adev->dm.freesync_module = NULL;
1838 mutex_destroy(&adev->dm.audio_lock);
1839 mutex_destroy(&adev->dm.dc_lock);
1844 static int load_dmcu_fw(struct amdgpu_device *adev)
1846 const char *fw_name_dmcu = NULL;
1848 const struct dmcu_firmware_header_v1_0 *hdr;
1850 switch(adev->asic_type) {
1851 #if defined(CONFIG_DRM_AMD_DC_SI)
1866 case CHIP_POLARIS11:
1867 case CHIP_POLARIS10:
1868 case CHIP_POLARIS12:
1875 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1878 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1879 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1880 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1881 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1886 switch (adev->ip_versions[DCE_HWIP][0]) {
1887 case IP_VERSION(2, 0, 2):
1888 case IP_VERSION(2, 0, 3):
1889 case IP_VERSION(2, 0, 0):
1890 case IP_VERSION(2, 1, 0):
1891 case IP_VERSION(3, 0, 0):
1892 case IP_VERSION(3, 0, 2):
1893 case IP_VERSION(3, 0, 3):
1894 case IP_VERSION(3, 0, 1):
1895 case IP_VERSION(3, 1, 2):
1896 case IP_VERSION(3, 1, 3):
1897 case IP_VERSION(3, 1, 5):
1898 case IP_VERSION(3, 1, 6):
1899 case IP_VERSION(3, 2, 0):
1900 case IP_VERSION(3, 2, 1):
1905 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1909 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1910 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1914 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1916 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1917 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1918 adev->dm.fw_dmcu = NULL;
1922 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1927 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1929 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1931 release_firmware(adev->dm.fw_dmcu);
1932 adev->dm.fw_dmcu = NULL;
1936 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1937 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1938 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1939 adev->firmware.fw_size +=
1940 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1942 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1943 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1944 adev->firmware.fw_size +=
1945 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1947 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1949 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1954 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1956 struct amdgpu_device *adev = ctx;
1958 return dm_read_reg(adev->dm.dc->ctx, address);
1961 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1964 struct amdgpu_device *adev = ctx;
1966 return dm_write_reg(adev->dm.dc->ctx, address, value);
1969 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1971 struct dmub_srv_create_params create_params;
1972 struct dmub_srv_region_params region_params;
1973 struct dmub_srv_region_info region_info;
1974 struct dmub_srv_fb_params fb_params;
1975 struct dmub_srv_fb_info *fb_info;
1976 struct dmub_srv *dmub_srv;
1977 const struct dmcub_firmware_header_v1_0 *hdr;
1978 const char *fw_name_dmub;
1979 enum dmub_asic dmub_asic;
1980 enum dmub_status status;
1983 switch (adev->ip_versions[DCE_HWIP][0]) {
1984 case IP_VERSION(2, 1, 0):
1985 dmub_asic = DMUB_ASIC_DCN21;
1986 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1987 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1988 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1990 case IP_VERSION(3, 0, 0):
1991 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1992 dmub_asic = DMUB_ASIC_DCN30;
1993 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1995 dmub_asic = DMUB_ASIC_DCN30;
1996 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1999 case IP_VERSION(3, 0, 1):
2000 dmub_asic = DMUB_ASIC_DCN301;
2001 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
2003 case IP_VERSION(3, 0, 2):
2004 dmub_asic = DMUB_ASIC_DCN302;
2005 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
2007 case IP_VERSION(3, 0, 3):
2008 dmub_asic = DMUB_ASIC_DCN303;
2009 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
2011 case IP_VERSION(3, 1, 2):
2012 case IP_VERSION(3, 1, 3):
2013 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
2014 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
2016 case IP_VERSION(3, 1, 4):
2017 dmub_asic = DMUB_ASIC_DCN314;
2018 fw_name_dmub = FIRMWARE_DCN_314_DMUB;
2020 case IP_VERSION(3, 1, 5):
2021 dmub_asic = DMUB_ASIC_DCN315;
2022 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
2024 case IP_VERSION(3, 1, 6):
2025 dmub_asic = DMUB_ASIC_DCN316;
2026 fw_name_dmub = FIRMWARE_DCN316_DMUB;
2028 case IP_VERSION(3, 2, 0):
2029 dmub_asic = DMUB_ASIC_DCN32;
2030 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
2032 case IP_VERSION(3, 2, 1):
2033 dmub_asic = DMUB_ASIC_DCN321;
2034 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
2037 /* ASIC doesn't support DMUB. */
2041 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
2043 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
2047 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
2049 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
2053 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
2054 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
2056 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2057 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
2058 AMDGPU_UCODE_ID_DMCUB;
2059 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
2061 adev->firmware.fw_size +=
2062 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
2064 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
2065 adev->dm.dmcub_fw_version);
2069 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
2070 dmub_srv = adev->dm.dmub_srv;
2073 DRM_ERROR("Failed to allocate DMUB service!\n");
2077 memset(&create_params, 0, sizeof(create_params));
2078 create_params.user_ctx = adev;
2079 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
2080 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
2081 create_params.asic = dmub_asic;
2083 /* Create the DMUB service. */
2084 status = dmub_srv_create(dmub_srv, &create_params);
2085 if (status != DMUB_STATUS_OK) {
2086 DRM_ERROR("Error creating DMUB service: %d\n", status);
2090 /* Calculate the size of all the regions for the DMUB service. */
2091 memset(®ion_params, 0, sizeof(region_params));
2093 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2094 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2095 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2096 region_params.vbios_size = adev->bios_size;
2097 region_params.fw_bss_data = region_params.bss_data_size ?
2098 adev->dm.dmub_fw->data +
2099 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2100 le32_to_cpu(hdr->inst_const_bytes) : NULL;
2101 region_params.fw_inst_const =
2102 adev->dm.dmub_fw->data +
2103 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2106 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
2109 if (status != DMUB_STATUS_OK) {
2110 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2115 * Allocate a framebuffer based on the total size of all the regions.
2116 * TODO: Move this into GART.
2118 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2119 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2120 &adev->dm.dmub_bo_gpu_addr,
2121 &adev->dm.dmub_bo_cpu_addr);
2125 /* Rebase the regions on the framebuffer address. */
2126 memset(&fb_params, 0, sizeof(fb_params));
2127 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2128 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2129 fb_params.region_info = ®ion_info;
2131 adev->dm.dmub_fb_info =
2132 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2133 fb_info = adev->dm.dmub_fb_info;
2137 "Failed to allocate framebuffer info for DMUB service!\n");
2141 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2142 if (status != DMUB_STATUS_OK) {
2143 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2150 static int dm_sw_init(void *handle)
2152 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2155 r = dm_dmub_sw_init(adev);
2159 return load_dmcu_fw(adev);
2162 static int dm_sw_fini(void *handle)
2164 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2166 kfree(adev->dm.dmub_fb_info);
2167 adev->dm.dmub_fb_info = NULL;
2169 if (adev->dm.dmub_srv) {
2170 dmub_srv_destroy(adev->dm.dmub_srv);
2171 adev->dm.dmub_srv = NULL;
2174 release_firmware(adev->dm.dmub_fw);
2175 adev->dm.dmub_fw = NULL;
2177 release_firmware(adev->dm.fw_dmcu);
2178 adev->dm.fw_dmcu = NULL;
2183 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2185 struct amdgpu_dm_connector *aconnector;
2186 struct drm_connector *connector;
2187 struct drm_connector_list_iter iter;
2190 drm_connector_list_iter_begin(dev, &iter);
2191 drm_for_each_connector_iter(connector, &iter) {
2192 aconnector = to_amdgpu_dm_connector(connector);
2193 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2194 aconnector->mst_mgr.aux) {
2195 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2197 aconnector->base.base.id);
2199 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2201 DRM_ERROR("DM_MST: Failed to start MST\n");
2202 aconnector->dc_link->type =
2203 dc_connection_single;
2208 drm_connector_list_iter_end(&iter);
2213 static int dm_late_init(void *handle)
2215 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2217 struct dmcu_iram_parameters params;
2218 unsigned int linear_lut[16];
2220 struct dmcu *dmcu = NULL;
2222 dmcu = adev->dm.dc->res_pool->dmcu;
2224 for (i = 0; i < 16; i++)
2225 linear_lut[i] = 0xFFFF * i / 15;
2228 params.backlight_ramping_override = false;
2229 params.backlight_ramping_start = 0xCCCC;
2230 params.backlight_ramping_reduction = 0xCCCCCCCC;
2231 params.backlight_lut_array_size = 16;
2232 params.backlight_lut_array = linear_lut;
2234 /* Min backlight level after ABM reduction, Don't allow below 1%
2235 * 0xFFFF x 0.01 = 0x28F
2237 params.min_abm_backlight = 0x28F;
2238 /* In the case where abm is implemented on dmcub,
2239 * dmcu object will be null.
2240 * ABM 2.4 and up are implemented on dmcub.
2243 if (!dmcu_load_iram(dmcu, params))
2245 } else if (adev->dm.dc->ctx->dmub_srv) {
2246 struct dc_link *edp_links[MAX_NUM_EDP];
2249 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2250 for (i = 0; i < edp_num; i++) {
2251 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2256 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2259 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2261 struct amdgpu_dm_connector *aconnector;
2262 struct drm_connector *connector;
2263 struct drm_connector_list_iter iter;
2264 struct drm_dp_mst_topology_mgr *mgr;
2266 bool need_hotplug = false;
2268 drm_connector_list_iter_begin(dev, &iter);
2269 drm_for_each_connector_iter(connector, &iter) {
2270 aconnector = to_amdgpu_dm_connector(connector);
2271 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2272 aconnector->mst_port)
2275 mgr = &aconnector->mst_mgr;
2278 drm_dp_mst_topology_mgr_suspend(mgr);
2280 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2282 dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2283 aconnector->dc_link);
2284 need_hotplug = true;
2288 drm_connector_list_iter_end(&iter);
2291 drm_kms_helper_hotplug_event(dev);
2294 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2298 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2299 * on window driver dc implementation.
2300 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2301 * should be passed to smu during boot up and resume from s3.
2302 * boot up: dc calculate dcn watermark clock settings within dc_create,
2303 * dcn20_resource_construct
2304 * then call pplib functions below to pass the settings to smu:
2305 * smu_set_watermarks_for_clock_ranges
2306 * smu_set_watermarks_table
2307 * navi10_set_watermarks_table
2308 * smu_write_watermarks_table
2310 * For Renoir, clock settings of dcn watermark are also fixed values.
2311 * dc has implemented different flow for window driver:
2312 * dc_hardware_init / dc_set_power_state
2317 * smu_set_watermarks_for_clock_ranges
2318 * renoir_set_watermarks_table
2319 * smu_write_watermarks_table
2322 * dc_hardware_init -> amdgpu_dm_init
2323 * dc_set_power_state --> dm_resume
2325 * therefore, this function apply to navi10/12/14 but not Renoir
2328 switch (adev->ip_versions[DCE_HWIP][0]) {
2329 case IP_VERSION(2, 0, 2):
2330 case IP_VERSION(2, 0, 0):
2336 ret = amdgpu_dpm_write_watermarks_table(adev);
2338 DRM_ERROR("Failed to update WMTABLE!\n");
2346 * dm_hw_init() - Initialize DC device
2347 * @handle: The base driver device containing the amdgpu_dm device.
2349 * Initialize the &struct amdgpu_display_manager device. This involves calling
2350 * the initializers of each DM component, then populating the struct with them.
2352 * Although the function implies hardware initialization, both hardware and
2353 * software are initialized here. Splitting them out to their relevant init
2354 * hooks is a future TODO item.
2356 * Some notable things that are initialized here:
2358 * - Display Core, both software and hardware
2359 * - DC modules that we need (freesync and color management)
2360 * - DRM software states
2361 * - Interrupt sources and handlers
2363 * - Debug FS entries, if enabled
2365 static int dm_hw_init(void *handle)
2367 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2368 /* Create DAL display manager */
2369 amdgpu_dm_init(adev);
2370 amdgpu_dm_hpd_init(adev);
2376 * dm_hw_fini() - Teardown DC device
2377 * @handle: The base driver device containing the amdgpu_dm device.
2379 * Teardown components within &struct amdgpu_display_manager that require
2380 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2381 * were loaded. Also flush IRQ workqueues and disable them.
2383 static int dm_hw_fini(void *handle)
2385 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2387 amdgpu_dm_hpd_fini(adev);
2389 amdgpu_dm_irq_fini(adev);
2390 amdgpu_dm_fini(adev);
2395 static int dm_enable_vblank(struct drm_crtc *crtc);
2396 static void dm_disable_vblank(struct drm_crtc *crtc);
2398 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2399 struct dc_state *state, bool enable)
2401 enum dc_irq_source irq_source;
2402 struct amdgpu_crtc *acrtc;
2406 for (i = 0; i < state->stream_count; i++) {
2407 acrtc = get_crtc_by_otg_inst(
2408 adev, state->stream_status[i].primary_otg_inst);
2410 if (acrtc && state->stream_status[i].plane_count != 0) {
2411 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2412 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2413 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2414 acrtc->crtc_id, enable ? "en" : "dis", rc);
2416 DRM_WARN("Failed to %s pflip interrupts\n",
2417 enable ? "enable" : "disable");
2420 rc = dm_enable_vblank(&acrtc->base);
2422 DRM_WARN("Failed to enable vblank interrupts\n");
2424 dm_disable_vblank(&acrtc->base);
2432 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2434 struct dc_state *context = NULL;
2435 enum dc_status res = DC_ERROR_UNEXPECTED;
2437 struct dc_stream_state *del_streams[MAX_PIPES];
2438 int del_streams_count = 0;
2440 memset(del_streams, 0, sizeof(del_streams));
2442 context = dc_create_state(dc);
2443 if (context == NULL)
2444 goto context_alloc_fail;
2446 dc_resource_state_copy_construct_current(dc, context);
2448 /* First remove from context all streams */
2449 for (i = 0; i < context->stream_count; i++) {
2450 struct dc_stream_state *stream = context->streams[i];
2452 del_streams[del_streams_count++] = stream;
2455 /* Remove all planes for removed streams and then remove the streams */
2456 for (i = 0; i < del_streams_count; i++) {
2457 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2458 res = DC_FAIL_DETACH_SURFACES;
2462 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2467 res = dc_commit_state(dc, context);
2470 dc_release_state(context);
2476 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2480 if (dm->hpd_rx_offload_wq) {
2481 for (i = 0; i < dm->dc->caps.max_links; i++)
2482 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2486 static int dm_suspend(void *handle)
2488 struct amdgpu_device *adev = handle;
2489 struct amdgpu_display_manager *dm = &adev->dm;
2492 if (amdgpu_in_reset(adev)) {
2493 mutex_lock(&dm->dc_lock);
2495 dc_allow_idle_optimizations(adev->dm.dc, false);
2497 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2499 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2501 amdgpu_dm_commit_zero_streams(dm->dc);
2503 amdgpu_dm_irq_suspend(adev);
2505 hpd_rx_irq_work_suspend(dm);
2510 WARN_ON(adev->dm.cached_state);
2511 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2513 s3_handle_mst(adev_to_drm(adev), true);
2515 amdgpu_dm_irq_suspend(adev);
2517 hpd_rx_irq_work_suspend(dm);
2519 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2524 struct amdgpu_dm_connector *
2525 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2526 struct drm_crtc *crtc)
2529 struct drm_connector_state *new_con_state;
2530 struct drm_connector *connector;
2531 struct drm_crtc *crtc_from_state;
2533 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2534 crtc_from_state = new_con_state->crtc;
2536 if (crtc_from_state == crtc)
2537 return to_amdgpu_dm_connector(connector);
2543 static void emulated_link_detect(struct dc_link *link)
2545 struct dc_sink_init_data sink_init_data = { 0 };
2546 struct display_sink_capability sink_caps = { 0 };
2547 enum dc_edid_status edid_status;
2548 struct dc_context *dc_ctx = link->ctx;
2549 struct dc_sink *sink = NULL;
2550 struct dc_sink *prev_sink = NULL;
2552 link->type = dc_connection_none;
2553 prev_sink = link->local_sink;
2556 dc_sink_release(prev_sink);
2558 switch (link->connector_signal) {
2559 case SIGNAL_TYPE_HDMI_TYPE_A: {
2560 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2561 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2565 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2566 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2567 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2571 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2572 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2573 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2577 case SIGNAL_TYPE_LVDS: {
2578 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2579 sink_caps.signal = SIGNAL_TYPE_LVDS;
2583 case SIGNAL_TYPE_EDP: {
2584 sink_caps.transaction_type =
2585 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2586 sink_caps.signal = SIGNAL_TYPE_EDP;
2590 case SIGNAL_TYPE_DISPLAY_PORT: {
2591 sink_caps.transaction_type =
2592 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2593 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2598 DC_ERROR("Invalid connector type! signal:%d\n",
2599 link->connector_signal);
2603 sink_init_data.link = link;
2604 sink_init_data.sink_signal = sink_caps.signal;
2606 sink = dc_sink_create(&sink_init_data);
2608 DC_ERROR("Failed to create sink!\n");
2612 /* dc_sink_create returns a new reference */
2613 link->local_sink = sink;
2615 edid_status = dm_helpers_read_local_edid(
2620 if (edid_status != EDID_OK)
2621 DC_ERROR("Failed to read EDID");
2625 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2626 struct amdgpu_display_manager *dm)
2629 struct dc_surface_update surface_updates[MAX_SURFACES];
2630 struct dc_plane_info plane_infos[MAX_SURFACES];
2631 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2632 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2633 struct dc_stream_update stream_update;
2637 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2640 dm_error("Failed to allocate update bundle\n");
2644 for (k = 0; k < dc_state->stream_count; k++) {
2645 bundle->stream_update.stream = dc_state->streams[k];
2647 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2648 bundle->surface_updates[m].surface =
2649 dc_state->stream_status->plane_states[m];
2650 bundle->surface_updates[m].surface->force_full_update =
2653 dc_commit_updates_for_stream(
2654 dm->dc, bundle->surface_updates,
2655 dc_state->stream_status->plane_count,
2656 dc_state->streams[k], &bundle->stream_update, dc_state);
2665 static int dm_resume(void *handle)
2667 struct amdgpu_device *adev = handle;
2668 struct drm_device *ddev = adev_to_drm(adev);
2669 struct amdgpu_display_manager *dm = &adev->dm;
2670 struct amdgpu_dm_connector *aconnector;
2671 struct drm_connector *connector;
2672 struct drm_connector_list_iter iter;
2673 struct drm_crtc *crtc;
2674 struct drm_crtc_state *new_crtc_state;
2675 struct dm_crtc_state *dm_new_crtc_state;
2676 struct drm_plane *plane;
2677 struct drm_plane_state *new_plane_state;
2678 struct dm_plane_state *dm_new_plane_state;
2679 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2680 enum dc_connection_type new_connection_type = dc_connection_none;
2681 struct dc_state *dc_state;
2684 if (amdgpu_in_reset(adev)) {
2685 dc_state = dm->cached_dc_state;
2688 * The dc->current_state is backed up into dm->cached_dc_state
2689 * before we commit 0 streams.
2691 * DC will clear link encoder assignments on the real state
2692 * but the changes won't propagate over to the copy we made
2693 * before the 0 streams commit.
2695 * DC expects that link encoder assignments are *not* valid
2696 * when committing a state, so as a workaround we can copy
2697 * off of the current state.
2699 * We lose the previous assignments, but we had already
2700 * commit 0 streams anyway.
2702 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2704 r = dm_dmub_hw_init(adev);
2706 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2708 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2711 amdgpu_dm_irq_resume_early(adev);
2713 for (i = 0; i < dc_state->stream_count; i++) {
2714 dc_state->streams[i]->mode_changed = true;
2715 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2716 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2721 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2722 amdgpu_dm_outbox_init(adev);
2723 dc_enable_dmub_outbox(adev->dm.dc);
2726 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2728 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2730 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2732 dc_release_state(dm->cached_dc_state);
2733 dm->cached_dc_state = NULL;
2735 amdgpu_dm_irq_resume_late(adev);
2737 mutex_unlock(&dm->dc_lock);
2741 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2742 dc_release_state(dm_state->context);
2743 dm_state->context = dc_create_state(dm->dc);
2744 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2745 dc_resource_state_construct(dm->dc, dm_state->context);
2747 /* Before powering on DC we need to re-initialize DMUB. */
2748 dm_dmub_hw_resume(adev);
2750 /* Re-enable outbox interrupts for DPIA. */
2751 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2752 amdgpu_dm_outbox_init(adev);
2753 dc_enable_dmub_outbox(adev->dm.dc);
2756 /* power on hardware */
2757 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2759 /* program HPD filter */
2763 * early enable HPD Rx IRQ, should be done before set mode as short
2764 * pulse interrupts are used for MST
2766 amdgpu_dm_irq_resume_early(adev);
2768 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2769 s3_handle_mst(ddev, false);
2772 drm_connector_list_iter_begin(ddev, &iter);
2773 drm_for_each_connector_iter(connector, &iter) {
2774 aconnector = to_amdgpu_dm_connector(connector);
2777 * this is the case when traversing through already created
2778 * MST connectors, should be skipped
2780 if (aconnector->dc_link &&
2781 aconnector->dc_link->type == dc_connection_mst_branch)
2784 mutex_lock(&aconnector->hpd_lock);
2785 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2786 DRM_ERROR("KMS: Failed to detect connector\n");
2788 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2789 emulated_link_detect(aconnector->dc_link);
2791 mutex_lock(&dm->dc_lock);
2792 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2793 mutex_unlock(&dm->dc_lock);
2796 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2797 aconnector->fake_enable = false;
2799 if (aconnector->dc_sink)
2800 dc_sink_release(aconnector->dc_sink);
2801 aconnector->dc_sink = NULL;
2802 amdgpu_dm_update_connector_after_detect(aconnector);
2803 mutex_unlock(&aconnector->hpd_lock);
2805 drm_connector_list_iter_end(&iter);
2807 /* Force mode set in atomic commit */
2808 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2809 new_crtc_state->active_changed = true;
2812 * atomic_check is expected to create the dc states. We need to release
2813 * them here, since they were duplicated as part of the suspend
2816 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2817 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2818 if (dm_new_crtc_state->stream) {
2819 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2820 dc_stream_release(dm_new_crtc_state->stream);
2821 dm_new_crtc_state->stream = NULL;
2825 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2826 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2827 if (dm_new_plane_state->dc_state) {
2828 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2829 dc_plane_state_release(dm_new_plane_state->dc_state);
2830 dm_new_plane_state->dc_state = NULL;
2834 drm_atomic_helper_resume(ddev, dm->cached_state);
2836 dm->cached_state = NULL;
2838 amdgpu_dm_irq_resume_late(adev);
2840 amdgpu_dm_smu_write_watermarks_table(adev);
2848 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2849 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2850 * the base driver's device list to be initialized and torn down accordingly.
2852 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2855 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2857 .early_init = dm_early_init,
2858 .late_init = dm_late_init,
2859 .sw_init = dm_sw_init,
2860 .sw_fini = dm_sw_fini,
2861 .early_fini = amdgpu_dm_early_fini,
2862 .hw_init = dm_hw_init,
2863 .hw_fini = dm_hw_fini,
2864 .suspend = dm_suspend,
2865 .resume = dm_resume,
2866 .is_idle = dm_is_idle,
2867 .wait_for_idle = dm_wait_for_idle,
2868 .check_soft_reset = dm_check_soft_reset,
2869 .soft_reset = dm_soft_reset,
2870 .set_clockgating_state = dm_set_clockgating_state,
2871 .set_powergating_state = dm_set_powergating_state,
2874 const struct amdgpu_ip_block_version dm_ip_block =
2876 .type = AMD_IP_BLOCK_TYPE_DCE,
2880 .funcs = &amdgpu_dm_funcs,
2890 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2891 .fb_create = amdgpu_display_user_framebuffer_create,
2892 .get_format_info = amd_get_format_info,
2893 .output_poll_changed = drm_fb_helper_output_poll_changed,
2894 .atomic_check = amdgpu_dm_atomic_check,
2895 .atomic_commit = drm_atomic_helper_commit,
2898 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2899 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2902 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2904 u32 max_avg, min_cll, max, min, q, r;
2905 struct amdgpu_dm_backlight_caps *caps;
2906 struct amdgpu_display_manager *dm;
2907 struct drm_connector *conn_base;
2908 struct amdgpu_device *adev;
2909 struct dc_link *link = NULL;
2910 static const u8 pre_computed_values[] = {
2911 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2912 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2915 if (!aconnector || !aconnector->dc_link)
2918 link = aconnector->dc_link;
2919 if (link->connector_signal != SIGNAL_TYPE_EDP)
2922 conn_base = &aconnector->base;
2923 adev = drm_to_adev(conn_base->dev);
2925 for (i = 0; i < dm->num_of_edps; i++) {
2926 if (link == dm->backlight_link[i])
2929 if (i >= dm->num_of_edps)
2931 caps = &dm->backlight_caps[i];
2932 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2933 caps->aux_support = false;
2934 max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2935 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2937 if (caps->ext_caps->bits.oled == 1 /*||
2938 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2939 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2940 caps->aux_support = true;
2942 if (amdgpu_backlight == 0)
2943 caps->aux_support = false;
2944 else if (amdgpu_backlight == 1)
2945 caps->aux_support = true;
2947 /* From the specification (CTA-861-G), for calculating the maximum
2948 * luminance we need to use:
2949 * Luminance = 50*2**(CV/32)
2950 * Where CV is a one-byte value.
2951 * For calculating this expression we may need float point precision;
2952 * to avoid this complexity level, we take advantage that CV is divided
2953 * by a constant. From the Euclids division algorithm, we know that CV
2954 * can be written as: CV = 32*q + r. Next, we replace CV in the
2955 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2956 * need to pre-compute the value of r/32. For pre-computing the values
2957 * We just used the following Ruby line:
2958 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2959 * The results of the above expressions can be verified at
2960 * pre_computed_values.
2964 max = (1 << q) * pre_computed_values[r];
2966 // min luminance: maxLum * (CV/255)^2 / 100
2967 q = DIV_ROUND_CLOSEST(min_cll, 255);
2968 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2970 caps->aux_max_input_signal = max;
2971 caps->aux_min_input_signal = min;
2974 void amdgpu_dm_update_connector_after_detect(
2975 struct amdgpu_dm_connector *aconnector)
2977 struct drm_connector *connector = &aconnector->base;
2978 struct drm_device *dev = connector->dev;
2979 struct dc_sink *sink;
2981 /* MST handled by drm_mst framework */
2982 if (aconnector->mst_mgr.mst_state == true)
2985 sink = aconnector->dc_link->local_sink;
2987 dc_sink_retain(sink);
2990 * Edid mgmt connector gets first update only in mode_valid hook and then
2991 * the connector sink is set to either fake or physical sink depends on link status.
2992 * Skip if already done during boot.
2994 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2995 && aconnector->dc_em_sink) {
2998 * For S3 resume with headless use eml_sink to fake stream
2999 * because on resume connector->sink is set to NULL
3001 mutex_lock(&dev->mode_config.mutex);
3004 if (aconnector->dc_sink) {
3005 amdgpu_dm_update_freesync_caps(connector, NULL);
3007 * retain and release below are used to
3008 * bump up refcount for sink because the link doesn't point
3009 * to it anymore after disconnect, so on next crtc to connector
3010 * reshuffle by UMD we will get into unwanted dc_sink release
3012 dc_sink_release(aconnector->dc_sink);
3014 aconnector->dc_sink = sink;
3015 dc_sink_retain(aconnector->dc_sink);
3016 amdgpu_dm_update_freesync_caps(connector,
3019 amdgpu_dm_update_freesync_caps(connector, NULL);
3020 if (!aconnector->dc_sink) {
3021 aconnector->dc_sink = aconnector->dc_em_sink;
3022 dc_sink_retain(aconnector->dc_sink);
3026 mutex_unlock(&dev->mode_config.mutex);
3029 dc_sink_release(sink);
3034 * TODO: temporary guard to look for proper fix
3035 * if this sink is MST sink, we should not do anything
3037 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
3038 dc_sink_release(sink);
3042 if (aconnector->dc_sink == sink) {
3044 * We got a DP short pulse (Link Loss, DP CTS, etc...).
3047 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
3048 aconnector->connector_id);
3050 dc_sink_release(sink);
3054 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
3055 aconnector->connector_id, aconnector->dc_sink, sink);
3057 mutex_lock(&dev->mode_config.mutex);
3060 * 1. Update status of the drm connector
3061 * 2. Send an event and let userspace tell us what to do
3065 * TODO: check if we still need the S3 mode update workaround.
3066 * If yes, put it here.
3068 if (aconnector->dc_sink) {
3069 amdgpu_dm_update_freesync_caps(connector, NULL);
3070 dc_sink_release(aconnector->dc_sink);
3073 aconnector->dc_sink = sink;
3074 dc_sink_retain(aconnector->dc_sink);
3075 if (sink->dc_edid.length == 0) {
3076 aconnector->edid = NULL;
3077 if (aconnector->dc_link->aux_mode) {
3078 drm_dp_cec_unset_edid(
3079 &aconnector->dm_dp_aux.aux);
3083 (struct edid *)sink->dc_edid.raw_edid;
3085 if (aconnector->dc_link->aux_mode)
3086 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3090 drm_connector_update_edid_property(connector, aconnector->edid);
3091 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3092 update_connector_ext_caps(aconnector);
3094 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3095 amdgpu_dm_update_freesync_caps(connector, NULL);
3096 drm_connector_update_edid_property(connector, NULL);
3097 aconnector->num_modes = 0;
3098 dc_sink_release(aconnector->dc_sink);
3099 aconnector->dc_sink = NULL;
3100 aconnector->edid = NULL;
3101 #ifdef CONFIG_DRM_AMD_DC_HDCP
3102 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3103 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3104 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3108 mutex_unlock(&dev->mode_config.mutex);
3110 update_subconnector_property(aconnector);
3113 dc_sink_release(sink);
3116 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3118 struct drm_connector *connector = &aconnector->base;
3119 struct drm_device *dev = connector->dev;
3120 enum dc_connection_type new_connection_type = dc_connection_none;
3121 struct amdgpu_device *adev = drm_to_adev(dev);
3122 #ifdef CONFIG_DRM_AMD_DC_HDCP
3123 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3127 if (adev->dm.disable_hpd_irq)
3131 * In case of failure or MST no need to update connector status or notify the OS
3132 * since (for MST case) MST does this in its own context.
3134 mutex_lock(&aconnector->hpd_lock);
3136 #ifdef CONFIG_DRM_AMD_DC_HDCP
3137 if (adev->dm.hdcp_workqueue) {
3138 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3139 dm_con_state->update_hdcp = true;
3142 if (aconnector->fake_enable)
3143 aconnector->fake_enable = false;
3145 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3146 DRM_ERROR("KMS: Failed to detect connector\n");
3148 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3149 emulated_link_detect(aconnector->dc_link);
3151 drm_modeset_lock_all(dev);
3152 dm_restore_drm_connector_state(dev, connector);
3153 drm_modeset_unlock_all(dev);
3155 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3156 drm_kms_helper_connector_hotplug_event(connector);
3158 mutex_lock(&adev->dm.dc_lock);
3159 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3160 mutex_unlock(&adev->dm.dc_lock);
3162 amdgpu_dm_update_connector_after_detect(aconnector);
3164 drm_modeset_lock_all(dev);
3165 dm_restore_drm_connector_state(dev, connector);
3166 drm_modeset_unlock_all(dev);
3168 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3169 drm_kms_helper_connector_hotplug_event(connector);
3172 mutex_unlock(&aconnector->hpd_lock);
3176 static void handle_hpd_irq(void *param)
3178 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3180 handle_hpd_irq_helper(aconnector);
3184 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3186 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3188 bool new_irq_handled = false;
3190 int dpcd_bytes_to_read;
3192 const int max_process_count = 30;
3193 int process_count = 0;
3195 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3197 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3198 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3199 /* DPCD 0x200 - 0x201 for downstream IRQ */
3200 dpcd_addr = DP_SINK_COUNT;
3202 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3203 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3204 dpcd_addr = DP_SINK_COUNT_ESI;
3207 dret = drm_dp_dpcd_read(
3208 &aconnector->dm_dp_aux.aux,
3211 dpcd_bytes_to_read);
3213 while (dret == dpcd_bytes_to_read &&
3214 process_count < max_process_count) {
3220 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3221 /* handle HPD short pulse irq */
3222 if (aconnector->mst_mgr.mst_state)
3224 &aconnector->mst_mgr,
3228 if (new_irq_handled) {
3229 /* ACK at DPCD to notify down stream */
3230 const int ack_dpcd_bytes_to_write =
3231 dpcd_bytes_to_read - 1;
3233 for (retry = 0; retry < 3; retry++) {
3236 wret = drm_dp_dpcd_write(
3237 &aconnector->dm_dp_aux.aux,
3240 ack_dpcd_bytes_to_write);
3241 if (wret == ack_dpcd_bytes_to_write)
3245 /* check if there is new irq to be handled */
3246 dret = drm_dp_dpcd_read(
3247 &aconnector->dm_dp_aux.aux,
3250 dpcd_bytes_to_read);
3252 new_irq_handled = false;
3258 if (process_count == max_process_count)
3259 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3262 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3263 union hpd_irq_data hpd_irq_data)
3265 struct hpd_rx_irq_offload_work *offload_work =
3266 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3268 if (!offload_work) {
3269 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3273 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3274 offload_work->data = hpd_irq_data;
3275 offload_work->offload_wq = offload_wq;
3277 queue_work(offload_wq->wq, &offload_work->work);
3278 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3281 static void handle_hpd_rx_irq(void *param)
3283 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3284 struct drm_connector *connector = &aconnector->base;
3285 struct drm_device *dev = connector->dev;
3286 struct dc_link *dc_link = aconnector->dc_link;
3287 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3288 bool result = false;
3289 enum dc_connection_type new_connection_type = dc_connection_none;
3290 struct amdgpu_device *adev = drm_to_adev(dev);
3291 union hpd_irq_data hpd_irq_data;
3292 bool link_loss = false;
3293 bool has_left_work = false;
3294 int idx = aconnector->base.index;
3295 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3297 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3299 if (adev->dm.disable_hpd_irq)
3303 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3304 * conflict, after implement i2c helper, this mutex should be
3307 mutex_lock(&aconnector->hpd_lock);
3309 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3310 &link_loss, true, &has_left_work);
3315 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3316 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3320 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3321 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3322 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3323 dm_handle_mst_sideband_msg(aconnector);
3330 spin_lock(&offload_wq->offload_lock);
3331 skip = offload_wq->is_handling_link_loss;
3334 offload_wq->is_handling_link_loss = true;
3336 spin_unlock(&offload_wq->offload_lock);
3339 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3346 if (result && !is_mst_root_connector) {
3347 /* Downstream Port status changed. */
3348 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3349 DRM_ERROR("KMS: Failed to detect connector\n");
3351 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3352 emulated_link_detect(dc_link);
3354 if (aconnector->fake_enable)
3355 aconnector->fake_enable = false;
3357 amdgpu_dm_update_connector_after_detect(aconnector);
3360 drm_modeset_lock_all(dev);
3361 dm_restore_drm_connector_state(dev, connector);
3362 drm_modeset_unlock_all(dev);
3364 drm_kms_helper_connector_hotplug_event(connector);
3368 mutex_lock(&adev->dm.dc_lock);
3369 ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
3370 mutex_unlock(&adev->dm.dc_lock);
3373 if (aconnector->fake_enable)
3374 aconnector->fake_enable = false;
3376 amdgpu_dm_update_connector_after_detect(aconnector);
3378 drm_modeset_lock_all(dev);
3379 dm_restore_drm_connector_state(dev, connector);
3380 drm_modeset_unlock_all(dev);
3382 drm_kms_helper_connector_hotplug_event(connector);
3386 #ifdef CONFIG_DRM_AMD_DC_HDCP
3387 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3388 if (adev->dm.hdcp_workqueue)
3389 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3393 if (dc_link->type != dc_connection_mst_branch)
3394 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3396 mutex_unlock(&aconnector->hpd_lock);
3399 static void register_hpd_handlers(struct amdgpu_device *adev)
3401 struct drm_device *dev = adev_to_drm(adev);
3402 struct drm_connector *connector;
3403 struct amdgpu_dm_connector *aconnector;
3404 const struct dc_link *dc_link;
3405 struct dc_interrupt_params int_params = {0};
3407 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3408 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3410 list_for_each_entry(connector,
3411 &dev->mode_config.connector_list, head) {
3413 aconnector = to_amdgpu_dm_connector(connector);
3414 dc_link = aconnector->dc_link;
3416 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3417 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3418 int_params.irq_source = dc_link->irq_source_hpd;
3420 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3422 (void *) aconnector);
3425 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3427 /* Also register for DP short pulse (hpd_rx). */
3428 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3429 int_params.irq_source = dc_link->irq_source_hpd_rx;
3431 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3433 (void *) aconnector);
3435 if (adev->dm.hpd_rx_offload_wq)
3436 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3442 #if defined(CONFIG_DRM_AMD_DC_SI)
3443 /* Register IRQ sources and initialize IRQ callbacks */
3444 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3446 struct dc *dc = adev->dm.dc;
3447 struct common_irq_params *c_irq_params;
3448 struct dc_interrupt_params int_params = {0};
3451 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3453 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3454 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3457 * Actions of amdgpu_irq_add_id():
3458 * 1. Register a set() function with base driver.
3459 * Base driver will call set() function to enable/disable an
3460 * interrupt in DC hardware.
3461 * 2. Register amdgpu_dm_irq_handler().
3462 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3463 * coming from DC hardware.
3464 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3465 * for acknowledging and handling. */
3467 /* Use VBLANK interrupt */
3468 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3469 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3471 DRM_ERROR("Failed to add crtc irq id!\n");
3475 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3476 int_params.irq_source =
3477 dc_interrupt_to_irq_source(dc, i+1 , 0);
3479 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3481 c_irq_params->adev = adev;
3482 c_irq_params->irq_src = int_params.irq_source;
3484 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3485 dm_crtc_high_irq, c_irq_params);
3488 /* Use GRPH_PFLIP interrupt */
3489 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3490 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3491 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3493 DRM_ERROR("Failed to add page flip irq id!\n");
3497 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3498 int_params.irq_source =
3499 dc_interrupt_to_irq_source(dc, i, 0);
3501 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3503 c_irq_params->adev = adev;
3504 c_irq_params->irq_src = int_params.irq_source;
3506 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3507 dm_pflip_high_irq, c_irq_params);
3512 r = amdgpu_irq_add_id(adev, client_id,
3513 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3515 DRM_ERROR("Failed to add hpd irq id!\n");
3519 register_hpd_handlers(adev);
3525 /* Register IRQ sources and initialize IRQ callbacks */
3526 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3528 struct dc *dc = adev->dm.dc;
3529 struct common_irq_params *c_irq_params;
3530 struct dc_interrupt_params int_params = {0};
3533 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3535 if (adev->family >= AMDGPU_FAMILY_AI)
3536 client_id = SOC15_IH_CLIENTID_DCE;
3538 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3539 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3542 * Actions of amdgpu_irq_add_id():
3543 * 1. Register a set() function with base driver.
3544 * Base driver will call set() function to enable/disable an
3545 * interrupt in DC hardware.
3546 * 2. Register amdgpu_dm_irq_handler().
3547 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3548 * coming from DC hardware.
3549 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3550 * for acknowledging and handling. */
3552 /* Use VBLANK interrupt */
3553 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3554 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3556 DRM_ERROR("Failed to add crtc irq id!\n");
3560 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3561 int_params.irq_source =
3562 dc_interrupt_to_irq_source(dc, i, 0);
3564 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3566 c_irq_params->adev = adev;
3567 c_irq_params->irq_src = int_params.irq_source;
3569 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3570 dm_crtc_high_irq, c_irq_params);
3573 /* Use VUPDATE interrupt */
3574 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3575 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3577 DRM_ERROR("Failed to add vupdate irq id!\n");
3581 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3582 int_params.irq_source =
3583 dc_interrupt_to_irq_source(dc, i, 0);
3585 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3587 c_irq_params->adev = adev;
3588 c_irq_params->irq_src = int_params.irq_source;
3590 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3591 dm_vupdate_high_irq, c_irq_params);
3594 /* Use GRPH_PFLIP interrupt */
3595 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3596 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3597 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3599 DRM_ERROR("Failed to add page flip irq id!\n");
3603 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3604 int_params.irq_source =
3605 dc_interrupt_to_irq_source(dc, i, 0);
3607 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3609 c_irq_params->adev = adev;
3610 c_irq_params->irq_src = int_params.irq_source;
3612 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3613 dm_pflip_high_irq, c_irq_params);
3618 r = amdgpu_irq_add_id(adev, client_id,
3619 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3621 DRM_ERROR("Failed to add hpd irq id!\n");
3625 register_hpd_handlers(adev);
3630 /* Register IRQ sources and initialize IRQ callbacks */
3631 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3633 struct dc *dc = adev->dm.dc;
3634 struct common_irq_params *c_irq_params;
3635 struct dc_interrupt_params int_params = {0};
3638 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3639 static const unsigned int vrtl_int_srcid[] = {
3640 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3641 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3642 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3643 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3644 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3645 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3649 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3650 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3653 * Actions of amdgpu_irq_add_id():
3654 * 1. Register a set() function with base driver.
3655 * Base driver will call set() function to enable/disable an
3656 * interrupt in DC hardware.
3657 * 2. Register amdgpu_dm_irq_handler().
3658 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3659 * coming from DC hardware.
3660 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3661 * for acknowledging and handling.
3664 /* Use VSTARTUP interrupt */
3665 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3666 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3668 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3671 DRM_ERROR("Failed to add crtc irq id!\n");
3675 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3676 int_params.irq_source =
3677 dc_interrupt_to_irq_source(dc, i, 0);
3679 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3681 c_irq_params->adev = adev;
3682 c_irq_params->irq_src = int_params.irq_source;
3684 amdgpu_dm_irq_register_interrupt(
3685 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3688 /* Use otg vertical line interrupt */
3689 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3690 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3691 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3692 vrtl_int_srcid[i], &adev->vline0_irq);
3695 DRM_ERROR("Failed to add vline0 irq id!\n");
3699 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3700 int_params.irq_source =
3701 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3703 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3704 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3708 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3709 - DC_IRQ_SOURCE_DC1_VLINE0];
3711 c_irq_params->adev = adev;
3712 c_irq_params->irq_src = int_params.irq_source;
3714 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3715 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3719 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3720 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3721 * to trigger at end of each vblank, regardless of state of the lock,
3722 * matching DCE behaviour.
3724 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3725 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3727 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3730 DRM_ERROR("Failed to add vupdate irq id!\n");
3734 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3735 int_params.irq_source =
3736 dc_interrupt_to_irq_source(dc, i, 0);
3738 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3740 c_irq_params->adev = adev;
3741 c_irq_params->irq_src = int_params.irq_source;
3743 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3744 dm_vupdate_high_irq, c_irq_params);
3747 /* Use GRPH_PFLIP interrupt */
3748 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3749 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3751 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3753 DRM_ERROR("Failed to add page flip irq id!\n");
3757 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3758 int_params.irq_source =
3759 dc_interrupt_to_irq_source(dc, i, 0);
3761 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3763 c_irq_params->adev = adev;
3764 c_irq_params->irq_src = int_params.irq_source;
3766 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3767 dm_pflip_high_irq, c_irq_params);
3772 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3775 DRM_ERROR("Failed to add hpd irq id!\n");
3779 register_hpd_handlers(adev);
3783 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3784 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3786 struct dc *dc = adev->dm.dc;
3787 struct common_irq_params *c_irq_params;
3788 struct dc_interrupt_params int_params = {0};
3791 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3792 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3794 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3795 &adev->dmub_outbox_irq);
3797 DRM_ERROR("Failed to add outbox irq id!\n");
3801 if (dc->ctx->dmub_srv) {
3802 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3803 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3804 int_params.irq_source =
3805 dc_interrupt_to_irq_source(dc, i, 0);
3807 c_irq_params = &adev->dm.dmub_outbox_params[0];
3809 c_irq_params->adev = adev;
3810 c_irq_params->irq_src = int_params.irq_source;
3812 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3813 dm_dmub_outbox1_low_irq, c_irq_params);
3820 * Acquires the lock for the atomic state object and returns
3821 * the new atomic state.
3823 * This should only be called during atomic check.
3825 int dm_atomic_get_state(struct drm_atomic_state *state,
3826 struct dm_atomic_state **dm_state)
3828 struct drm_device *dev = state->dev;
3829 struct amdgpu_device *adev = drm_to_adev(dev);
3830 struct amdgpu_display_manager *dm = &adev->dm;
3831 struct drm_private_state *priv_state;
3836 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3837 if (IS_ERR(priv_state))
3838 return PTR_ERR(priv_state);
3840 *dm_state = to_dm_atomic_state(priv_state);
3845 static struct dm_atomic_state *
3846 dm_atomic_get_new_state(struct drm_atomic_state *state)
3848 struct drm_device *dev = state->dev;
3849 struct amdgpu_device *adev = drm_to_adev(dev);
3850 struct amdgpu_display_manager *dm = &adev->dm;
3851 struct drm_private_obj *obj;
3852 struct drm_private_state *new_obj_state;
3855 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3856 if (obj->funcs == dm->atomic_obj.funcs)
3857 return to_dm_atomic_state(new_obj_state);
3863 static struct drm_private_state *
3864 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3866 struct dm_atomic_state *old_state, *new_state;
3868 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3872 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3874 old_state = to_dm_atomic_state(obj->state);
3876 if (old_state && old_state->context)
3877 new_state->context = dc_copy_state(old_state->context);
3879 if (!new_state->context) {
3884 return &new_state->base;
3887 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3888 struct drm_private_state *state)
3890 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3892 if (dm_state && dm_state->context)
3893 dc_release_state(dm_state->context);
3898 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3899 .atomic_duplicate_state = dm_atomic_duplicate_state,
3900 .atomic_destroy_state = dm_atomic_destroy_state,
3903 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3905 struct dm_atomic_state *state;
3908 adev->mode_info.mode_config_initialized = true;
3910 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3911 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3913 adev_to_drm(adev)->mode_config.max_width = 16384;
3914 adev_to_drm(adev)->mode_config.max_height = 16384;
3916 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3917 /* disable prefer shadow for now due to hibernation issues */
3918 adev_to_drm(adev)->mode_config.prefer_shadow = 0;
3919 /* indicates support for immediate flip */
3920 adev_to_drm(adev)->mode_config.async_page_flip = true;
3922 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3924 state = kzalloc(sizeof(*state), GFP_KERNEL);
3928 state->context = dc_create_state(adev->dm.dc);
3929 if (!state->context) {
3934 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3936 drm_atomic_private_obj_init(adev_to_drm(adev),
3937 &adev->dm.atomic_obj,
3939 &dm_atomic_state_funcs);
3941 r = amdgpu_display_modeset_create_props(adev);
3943 dc_release_state(state->context);
3948 r = amdgpu_dm_audio_init(adev);
3950 dc_release_state(state->context);
3958 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3959 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3960 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3962 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3965 #if defined(CONFIG_ACPI)
3966 struct amdgpu_dm_backlight_caps caps;
3968 memset(&caps, 0, sizeof(caps));
3970 if (dm->backlight_caps[bl_idx].caps_valid)
3973 amdgpu_acpi_get_backlight_caps(&caps);
3974 if (caps.caps_valid) {
3975 dm->backlight_caps[bl_idx].caps_valid = true;
3976 if (caps.aux_support)
3978 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3979 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3981 dm->backlight_caps[bl_idx].min_input_signal =
3982 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3983 dm->backlight_caps[bl_idx].max_input_signal =
3984 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3987 if (dm->backlight_caps[bl_idx].aux_support)
3990 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3991 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3995 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3996 unsigned *min, unsigned *max)
4001 if (caps->aux_support) {
4002 // Firmware limits are in nits, DC API wants millinits.
4003 *max = 1000 * caps->aux_max_input_signal;
4004 *min = 1000 * caps->aux_min_input_signal;
4006 // Firmware limits are 8-bit, PWM control is 16-bit.
4007 *max = 0x101 * caps->max_input_signal;
4008 *min = 0x101 * caps->min_input_signal;
4013 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
4014 uint32_t brightness)
4018 if (!get_brightness_range(caps, &min, &max))
4021 // Rescale 0..255 to min..max
4022 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
4023 AMDGPU_MAX_BL_LEVEL);
4026 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
4027 uint32_t brightness)
4031 if (!get_brightness_range(caps, &min, &max))
4034 if (brightness < min)
4036 // Rescale min..max to 0..255
4037 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
4041 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
4043 u32 user_brightness)
4045 struct amdgpu_dm_backlight_caps caps;
4046 struct dc_link *link;
4050 amdgpu_dm_update_backlight_caps(dm, bl_idx);
4051 caps = dm->backlight_caps[bl_idx];
4053 dm->brightness[bl_idx] = user_brightness;
4054 /* update scratch register */
4056 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
4057 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
4058 link = (struct dc_link *)dm->backlight_link[bl_idx];
4060 /* Change brightness based on AUX property */
4061 if (caps.aux_support) {
4062 rc = dc_link_set_backlight_level_nits(link, true, brightness,
4063 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
4065 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
4067 rc = dc_link_set_backlight_level(link, brightness, 0);
4069 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4073 dm->actual_brightness[bl_idx] = user_brightness;
4076 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4078 struct amdgpu_display_manager *dm = bl_get_data(bd);
4081 for (i = 0; i < dm->num_of_edps; i++) {
4082 if (bd == dm->backlight_dev[i])
4085 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4087 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4092 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4095 struct amdgpu_dm_backlight_caps caps;
4096 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4098 amdgpu_dm_update_backlight_caps(dm, bl_idx);
4099 caps = dm->backlight_caps[bl_idx];
4101 if (caps.aux_support) {
4105 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4107 return dm->brightness[bl_idx];
4108 return convert_brightness_to_user(&caps, avg);
4110 int ret = dc_link_get_backlight_level(link);
4112 if (ret == DC_ERROR_UNEXPECTED)
4113 return dm->brightness[bl_idx];
4114 return convert_brightness_to_user(&caps, ret);
4118 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4120 struct amdgpu_display_manager *dm = bl_get_data(bd);
4123 for (i = 0; i < dm->num_of_edps; i++) {
4124 if (bd == dm->backlight_dev[i])
4127 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4129 return amdgpu_dm_backlight_get_level(dm, i);
4132 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4133 .options = BL_CORE_SUSPENDRESUME,
4134 .get_brightness = amdgpu_dm_backlight_get_brightness,
4135 .update_status = amdgpu_dm_backlight_update_status,
4139 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4142 struct backlight_properties props = { 0 };
4144 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4145 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4147 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4148 props.brightness = AMDGPU_MAX_BL_LEVEL;
4149 props.type = BACKLIGHT_RAW;
4151 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4152 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4154 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4155 adev_to_drm(dm->adev)->dev,
4157 &amdgpu_dm_backlight_ops,
4160 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4161 DRM_ERROR("DM: Backlight registration failed!\n");
4163 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4166 static int initialize_plane(struct amdgpu_display_manager *dm,
4167 struct amdgpu_mode_info *mode_info, int plane_id,
4168 enum drm_plane_type plane_type,
4169 const struct dc_plane_cap *plane_cap)
4171 struct drm_plane *plane;
4172 unsigned long possible_crtcs;
4175 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4177 DRM_ERROR("KMS: Failed to allocate plane\n");
4180 plane->type = plane_type;
4183 * HACK: IGT tests expect that the primary plane for a CRTC
4184 * can only have one possible CRTC. Only expose support for
4185 * any CRTC if they're not going to be used as a primary plane
4186 * for a CRTC - like overlay or underlay planes.
4188 possible_crtcs = 1 << plane_id;
4189 if (plane_id >= dm->dc->caps.max_streams)
4190 possible_crtcs = 0xff;
4192 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4195 DRM_ERROR("KMS: Failed to initialize plane\n");
4201 mode_info->planes[plane_id] = plane;
4207 static void register_backlight_device(struct amdgpu_display_manager *dm,
4208 struct dc_link *link)
4210 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4211 link->type != dc_connection_none) {
4213 * Event if registration failed, we should continue with
4214 * DM initialization because not having a backlight control
4215 * is better then a black screen.
4217 if (!dm->backlight_dev[dm->num_of_edps])
4218 amdgpu_dm_register_backlight_device(dm);
4220 if (dm->backlight_dev[dm->num_of_edps]) {
4221 dm->backlight_link[dm->num_of_edps] = link;
4229 * In this architecture, the association
4230 * connector -> encoder -> crtc
4231 * id not really requried. The crtc and connector will hold the
4232 * display_index as an abstraction to use with DAL component
4234 * Returns 0 on success
4236 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4238 struct amdgpu_display_manager *dm = &adev->dm;
4240 struct amdgpu_dm_connector *aconnector = NULL;
4241 struct amdgpu_encoder *aencoder = NULL;
4242 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4244 int32_t primary_planes;
4245 enum dc_connection_type new_connection_type = dc_connection_none;
4246 const struct dc_plane_cap *plane;
4247 bool psr_feature_enabled = false;
4249 dm->display_indexes_num = dm->dc->caps.max_streams;
4250 /* Update the actual used number of crtc */
4251 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4253 link_cnt = dm->dc->caps.max_links;
4254 if (amdgpu_dm_mode_config_init(dm->adev)) {
4255 DRM_ERROR("DM: Failed to initialize mode config\n");
4259 /* There is one primary plane per CRTC */
4260 primary_planes = dm->dc->caps.max_streams;
4261 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4264 * Initialize primary planes, implicit planes for legacy IOCTLS.
4265 * Order is reversed to match iteration order in atomic check.
4267 for (i = (primary_planes - 1); i >= 0; i--) {
4268 plane = &dm->dc->caps.planes[i];
4270 if (initialize_plane(dm, mode_info, i,
4271 DRM_PLANE_TYPE_PRIMARY, plane)) {
4272 DRM_ERROR("KMS: Failed to initialize primary plane\n");
4278 * Initialize overlay planes, index starting after primary planes.
4279 * These planes have a higher DRM index than the primary planes since
4280 * they should be considered as having a higher z-order.
4281 * Order is reversed to match iteration order in atomic check.
4283 * Only support DCN for now, and only expose one so we don't encourage
4284 * userspace to use up all the pipes.
4286 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4287 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4289 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4292 if (!plane->blends_with_above || !plane->blends_with_below)
4295 if (!plane->pixel_format_support.argb8888)
4298 if (initialize_plane(dm, NULL, primary_planes + i,
4299 DRM_PLANE_TYPE_OVERLAY, plane)) {
4300 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4304 /* Only create one overlay plane. */
4308 for (i = 0; i < dm->dc->caps.max_streams; i++)
4309 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4310 DRM_ERROR("KMS: Failed to initialize crtc\n");
4314 /* Use Outbox interrupt */
4315 switch (adev->ip_versions[DCE_HWIP][0]) {
4316 case IP_VERSION(3, 0, 0):
4317 case IP_VERSION(3, 1, 2):
4318 case IP_VERSION(3, 1, 3):
4319 case IP_VERSION(3, 1, 4):
4320 case IP_VERSION(3, 1, 5):
4321 case IP_VERSION(3, 1, 6):
4322 case IP_VERSION(3, 2, 0):
4323 case IP_VERSION(3, 2, 1):
4324 case IP_VERSION(2, 1, 0):
4325 if (register_outbox_irq_handlers(dm->adev)) {
4326 DRM_ERROR("DM: Failed to initialize IRQ\n");
4331 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4332 adev->ip_versions[DCE_HWIP][0]);
4335 /* Determine whether to enable PSR support by default. */
4336 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4337 switch (adev->ip_versions[DCE_HWIP][0]) {
4338 case IP_VERSION(3, 1, 2):
4339 case IP_VERSION(3, 1, 3):
4340 case IP_VERSION(3, 1, 4):
4341 case IP_VERSION(3, 1, 5):
4342 case IP_VERSION(3, 1, 6):
4343 case IP_VERSION(3, 2, 0):
4344 case IP_VERSION(3, 2, 1):
4345 psr_feature_enabled = true;
4348 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4353 /* loops over all connectors on the board */
4354 for (i = 0; i < link_cnt; i++) {
4355 struct dc_link *link = NULL;
4357 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4359 "KMS: Cannot support more than %d display indexes\n",
4360 AMDGPU_DM_MAX_DISPLAY_INDEX);
4364 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4368 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4372 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4373 DRM_ERROR("KMS: Failed to initialize encoder\n");
4377 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4378 DRM_ERROR("KMS: Failed to initialize connector\n");
4382 link = dc_get_link_at_index(dm->dc, i);
4384 if (!dc_link_detect_sink(link, &new_connection_type))
4385 DRM_ERROR("KMS: Failed to detect connector\n");
4387 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4388 emulated_link_detect(link);
4389 amdgpu_dm_update_connector_after_detect(aconnector);
4393 mutex_lock(&dm->dc_lock);
4394 ret = dc_link_detect(link, DETECT_REASON_BOOT);
4395 mutex_unlock(&dm->dc_lock);
4398 amdgpu_dm_update_connector_after_detect(aconnector);
4399 register_backlight_device(dm, link);
4401 if (dm->num_of_edps)
4402 update_connector_ext_caps(aconnector);
4404 if (psr_feature_enabled)
4405 amdgpu_dm_set_psr_caps(link);
4407 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4408 * PSR is also supported.
4410 if (link->psr_settings.psr_feature_enabled)
4411 adev_to_drm(adev)->vblank_disable_immediate = false;
4416 /* Software is initialized. Now we can register interrupt handlers. */
4417 switch (adev->asic_type) {
4418 #if defined(CONFIG_DRM_AMD_DC_SI)
4423 if (dce60_register_irq_handlers(dm->adev)) {
4424 DRM_ERROR("DM: Failed to initialize IRQ\n");
4438 case CHIP_POLARIS11:
4439 case CHIP_POLARIS10:
4440 case CHIP_POLARIS12:
4445 if (dce110_register_irq_handlers(dm->adev)) {
4446 DRM_ERROR("DM: Failed to initialize IRQ\n");
4451 switch (adev->ip_versions[DCE_HWIP][0]) {
4452 case IP_VERSION(1, 0, 0):
4453 case IP_VERSION(1, 0, 1):
4454 case IP_VERSION(2, 0, 2):
4455 case IP_VERSION(2, 0, 3):
4456 case IP_VERSION(2, 0, 0):
4457 case IP_VERSION(2, 1, 0):
4458 case IP_VERSION(3, 0, 0):
4459 case IP_VERSION(3, 0, 2):
4460 case IP_VERSION(3, 0, 3):
4461 case IP_VERSION(3, 0, 1):
4462 case IP_VERSION(3, 1, 2):
4463 case IP_VERSION(3, 1, 3):
4464 case IP_VERSION(3, 1, 4):
4465 case IP_VERSION(3, 1, 5):
4466 case IP_VERSION(3, 1, 6):
4467 case IP_VERSION(3, 2, 0):
4468 case IP_VERSION(3, 2, 1):
4469 if (dcn10_register_irq_handlers(dm->adev)) {
4470 DRM_ERROR("DM: Failed to initialize IRQ\n");
4475 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4476 adev->ip_versions[DCE_HWIP][0]);
4490 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4492 drm_atomic_private_obj_fini(&dm->atomic_obj);
4496 /******************************************************************************
4497 * amdgpu_display_funcs functions
4498 *****************************************************************************/
4501 * dm_bandwidth_update - program display watermarks
4503 * @adev: amdgpu_device pointer
4505 * Calculate and program the display watermarks and line buffer allocation.
4507 static void dm_bandwidth_update(struct amdgpu_device *adev)
4509 /* TODO: implement later */
4512 static const struct amdgpu_display_funcs dm_display_funcs = {
4513 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4514 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4515 .backlight_set_level = NULL, /* never called for DC */
4516 .backlight_get_level = NULL, /* never called for DC */
4517 .hpd_sense = NULL,/* called unconditionally */
4518 .hpd_set_polarity = NULL, /* called unconditionally */
4519 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4520 .page_flip_get_scanoutpos =
4521 dm_crtc_get_scanoutpos,/* called unconditionally */
4522 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4523 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4526 #if defined(CONFIG_DEBUG_KERNEL_DC)
4528 static ssize_t s3_debug_store(struct device *device,
4529 struct device_attribute *attr,
4535 struct drm_device *drm_dev = dev_get_drvdata(device);
4536 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4538 ret = kstrtoint(buf, 0, &s3_state);
4543 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4548 return ret == 0 ? count : 0;
4551 DEVICE_ATTR_WO(s3_debug);
4555 static int dm_early_init(void *handle)
4557 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4559 switch (adev->asic_type) {
4560 #if defined(CONFIG_DRM_AMD_DC_SI)
4564 adev->mode_info.num_crtc = 6;
4565 adev->mode_info.num_hpd = 6;
4566 adev->mode_info.num_dig = 6;
4569 adev->mode_info.num_crtc = 2;
4570 adev->mode_info.num_hpd = 2;
4571 adev->mode_info.num_dig = 2;
4576 adev->mode_info.num_crtc = 6;
4577 adev->mode_info.num_hpd = 6;
4578 adev->mode_info.num_dig = 6;
4581 adev->mode_info.num_crtc = 4;
4582 adev->mode_info.num_hpd = 6;
4583 adev->mode_info.num_dig = 7;
4587 adev->mode_info.num_crtc = 2;
4588 adev->mode_info.num_hpd = 6;
4589 adev->mode_info.num_dig = 6;
4593 adev->mode_info.num_crtc = 6;
4594 adev->mode_info.num_hpd = 6;
4595 adev->mode_info.num_dig = 7;
4598 adev->mode_info.num_crtc = 3;
4599 adev->mode_info.num_hpd = 6;
4600 adev->mode_info.num_dig = 9;
4603 adev->mode_info.num_crtc = 2;
4604 adev->mode_info.num_hpd = 6;
4605 adev->mode_info.num_dig = 9;
4607 case CHIP_POLARIS11:
4608 case CHIP_POLARIS12:
4609 adev->mode_info.num_crtc = 5;
4610 adev->mode_info.num_hpd = 5;
4611 adev->mode_info.num_dig = 5;
4613 case CHIP_POLARIS10:
4615 adev->mode_info.num_crtc = 6;
4616 adev->mode_info.num_hpd = 6;
4617 adev->mode_info.num_dig = 6;
4622 adev->mode_info.num_crtc = 6;
4623 adev->mode_info.num_hpd = 6;
4624 adev->mode_info.num_dig = 6;
4628 switch (adev->ip_versions[DCE_HWIP][0]) {
4629 case IP_VERSION(2, 0, 2):
4630 case IP_VERSION(3, 0, 0):
4631 adev->mode_info.num_crtc = 6;
4632 adev->mode_info.num_hpd = 6;
4633 adev->mode_info.num_dig = 6;
4635 case IP_VERSION(2, 0, 0):
4636 case IP_VERSION(3, 0, 2):
4637 adev->mode_info.num_crtc = 5;
4638 adev->mode_info.num_hpd = 5;
4639 adev->mode_info.num_dig = 5;
4641 case IP_VERSION(2, 0, 3):
4642 case IP_VERSION(3, 0, 3):
4643 adev->mode_info.num_crtc = 2;
4644 adev->mode_info.num_hpd = 2;
4645 adev->mode_info.num_dig = 2;
4647 case IP_VERSION(1, 0, 0):
4648 case IP_VERSION(1, 0, 1):
4649 case IP_VERSION(3, 0, 1):
4650 case IP_VERSION(2, 1, 0):
4651 case IP_VERSION(3, 1, 2):
4652 case IP_VERSION(3, 1, 3):
4653 case IP_VERSION(3, 1, 4):
4654 case IP_VERSION(3, 1, 5):
4655 case IP_VERSION(3, 1, 6):
4656 case IP_VERSION(3, 2, 0):
4657 case IP_VERSION(3, 2, 1):
4658 adev->mode_info.num_crtc = 4;
4659 adev->mode_info.num_hpd = 4;
4660 adev->mode_info.num_dig = 4;
4663 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4664 adev->ip_versions[DCE_HWIP][0]);
4670 amdgpu_dm_set_irq_funcs(adev);
4672 if (adev->mode_info.funcs == NULL)
4673 adev->mode_info.funcs = &dm_display_funcs;
4676 * Note: Do NOT change adev->audio_endpt_rreg and
4677 * adev->audio_endpt_wreg because they are initialised in
4678 * amdgpu_device_init()
4680 #if defined(CONFIG_DEBUG_KERNEL_DC)
4682 adev_to_drm(adev)->dev,
4683 &dev_attr_s3_debug);
4689 static bool modeset_required(struct drm_crtc_state *crtc_state,
4690 struct dc_stream_state *new_stream,
4691 struct dc_stream_state *old_stream)
4693 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4696 static bool modereset_required(struct drm_crtc_state *crtc_state)
4698 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4701 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4703 drm_encoder_cleanup(encoder);
4707 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4708 .destroy = amdgpu_dm_encoder_destroy,
4712 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4713 struct drm_framebuffer *fb,
4714 int *min_downscale, int *max_upscale)
4716 struct amdgpu_device *adev = drm_to_adev(dev);
4717 struct dc *dc = adev->dm.dc;
4718 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4719 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4721 switch (fb->format->format) {
4722 case DRM_FORMAT_P010:
4723 case DRM_FORMAT_NV12:
4724 case DRM_FORMAT_NV21:
4725 *max_upscale = plane_cap->max_upscale_factor.nv12;
4726 *min_downscale = plane_cap->max_downscale_factor.nv12;
4729 case DRM_FORMAT_XRGB16161616F:
4730 case DRM_FORMAT_ARGB16161616F:
4731 case DRM_FORMAT_XBGR16161616F:
4732 case DRM_FORMAT_ABGR16161616F:
4733 *max_upscale = plane_cap->max_upscale_factor.fp16;
4734 *min_downscale = plane_cap->max_downscale_factor.fp16;
4738 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4739 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4744 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4745 * scaling factor of 1.0 == 1000 units.
4747 if (*max_upscale == 1)
4748 *max_upscale = 1000;
4750 if (*min_downscale == 1)
4751 *min_downscale = 1000;
4755 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4756 const struct drm_plane_state *state,
4757 struct dc_scaling_info *scaling_info)
4759 int scale_w, scale_h, min_downscale, max_upscale;
4761 memset(scaling_info, 0, sizeof(*scaling_info));
4763 /* Source is fixed 16.16 but we ignore mantissa for now... */
4764 scaling_info->src_rect.x = state->src_x >> 16;
4765 scaling_info->src_rect.y = state->src_y >> 16;
4768 * For reasons we don't (yet) fully understand a non-zero
4769 * src_y coordinate into an NV12 buffer can cause a
4770 * system hang on DCN1x.
4771 * To avoid hangs (and maybe be overly cautious)
4772 * let's reject both non-zero src_x and src_y.
4774 * We currently know of only one use-case to reproduce a
4775 * scenario with non-zero src_x and src_y for NV12, which
4776 * is to gesture the YouTube Android app into full screen
4779 if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4780 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4781 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4782 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4785 scaling_info->src_rect.width = state->src_w >> 16;
4786 if (scaling_info->src_rect.width == 0)
4789 scaling_info->src_rect.height = state->src_h >> 16;
4790 if (scaling_info->src_rect.height == 0)
4793 scaling_info->dst_rect.x = state->crtc_x;
4794 scaling_info->dst_rect.y = state->crtc_y;
4796 if (state->crtc_w == 0)
4799 scaling_info->dst_rect.width = state->crtc_w;
4801 if (state->crtc_h == 0)
4804 scaling_info->dst_rect.height = state->crtc_h;
4806 /* DRM doesn't specify clipping on destination output. */
4807 scaling_info->clip_rect = scaling_info->dst_rect;
4809 /* Validate scaling per-format with DC plane caps */
4810 if (state->plane && state->plane->dev && state->fb) {
4811 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4812 &min_downscale, &max_upscale);
4814 min_downscale = 250;
4815 max_upscale = 16000;
4818 scale_w = scaling_info->dst_rect.width * 1000 /
4819 scaling_info->src_rect.width;
4821 if (scale_w < min_downscale || scale_w > max_upscale)
4824 scale_h = scaling_info->dst_rect.height * 1000 /
4825 scaling_info->src_rect.height;
4827 if (scale_h < min_downscale || scale_h > max_upscale)
4831 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4832 * assume reasonable defaults based on the format.
4839 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4840 uint64_t tiling_flags)
4842 /* Fill GFX8 params */
4843 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4844 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4846 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4847 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4848 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4849 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4850 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4852 /* XXX fix me for VI */
4853 tiling_info->gfx8.num_banks = num_banks;
4854 tiling_info->gfx8.array_mode =
4855 DC_ARRAY_2D_TILED_THIN1;
4856 tiling_info->gfx8.tile_split = tile_split;
4857 tiling_info->gfx8.bank_width = bankw;
4858 tiling_info->gfx8.bank_height = bankh;
4859 tiling_info->gfx8.tile_aspect = mtaspect;
4860 tiling_info->gfx8.tile_mode =
4861 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4862 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4863 == DC_ARRAY_1D_TILED_THIN1) {
4864 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4867 tiling_info->gfx8.pipe_config =
4868 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4872 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4873 union dc_tiling_info *tiling_info)
4875 tiling_info->gfx9.num_pipes =
4876 adev->gfx.config.gb_addr_config_fields.num_pipes;
4877 tiling_info->gfx9.num_banks =
4878 adev->gfx.config.gb_addr_config_fields.num_banks;
4879 tiling_info->gfx9.pipe_interleave =
4880 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4881 tiling_info->gfx9.num_shader_engines =
4882 adev->gfx.config.gb_addr_config_fields.num_se;
4883 tiling_info->gfx9.max_compressed_frags =
4884 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4885 tiling_info->gfx9.num_rb_per_se =
4886 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4887 tiling_info->gfx9.shaderEnable = 1;
4888 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4889 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4893 validate_dcc(struct amdgpu_device *adev,
4894 const enum surface_pixel_format format,
4895 const enum dc_rotation_angle rotation,
4896 const union dc_tiling_info *tiling_info,
4897 const struct dc_plane_dcc_param *dcc,
4898 const struct dc_plane_address *address,
4899 const struct plane_size *plane_size)
4901 struct dc *dc = adev->dm.dc;
4902 struct dc_dcc_surface_param input;
4903 struct dc_surface_dcc_cap output;
4905 memset(&input, 0, sizeof(input));
4906 memset(&output, 0, sizeof(output));
4911 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4912 !dc->cap_funcs.get_dcc_compression_cap)
4915 input.format = format;
4916 input.surface_size.width = plane_size->surface_size.width;
4917 input.surface_size.height = plane_size->surface_size.height;
4918 input.swizzle_mode = tiling_info->gfx9.swizzle;
4920 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4921 input.scan = SCAN_DIRECTION_HORIZONTAL;
4922 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4923 input.scan = SCAN_DIRECTION_VERTICAL;
4925 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4928 if (!output.capable)
4931 if (dcc->independent_64b_blks == 0 &&
4932 output.grph.rgb.independent_64b_blks != 0)
4939 modifier_has_dcc(uint64_t modifier)
4941 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4945 modifier_gfx9_swizzle_mode(uint64_t modifier)
4947 if (modifier == DRM_FORMAT_MOD_LINEAR)
4950 return AMD_FMT_MOD_GET(TILE, modifier);
4953 static const struct drm_format_info *
4954 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4956 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4960 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4961 union dc_tiling_info *tiling_info,
4964 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4965 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4966 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4967 unsigned int pipes_log2;
4969 pipes_log2 = min(5u, mod_pipe_xor_bits);
4971 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4973 if (!IS_AMD_FMT_MOD(modifier))
4976 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4977 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4979 if (adev->family >= AMDGPU_FAMILY_NV) {
4980 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4982 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4984 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4988 enum dm_micro_swizzle {
4989 MICRO_SWIZZLE_Z = 0,
4990 MICRO_SWIZZLE_S = 1,
4991 MICRO_SWIZZLE_D = 2,
4995 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4999 struct amdgpu_device *adev = drm_to_adev(plane->dev);
5000 const struct drm_format_info *info = drm_format_info(format);
5003 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
5009 * We always have to allow these modifiers:
5010 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
5011 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
5013 if (modifier == DRM_FORMAT_MOD_LINEAR ||
5014 modifier == DRM_FORMAT_MOD_INVALID) {
5018 /* Check that the modifier is on the list of the plane's supported modifiers. */
5019 for (i = 0; i < plane->modifier_count; i++) {
5020 if (modifier == plane->modifiers[i])
5023 if (i == plane->modifier_count)
5027 * For D swizzle the canonical modifier depends on the bpp, so check
5030 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
5031 adev->family >= AMDGPU_FAMILY_NV) {
5032 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
5036 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
5040 if (modifier_has_dcc(modifier)) {
5041 /* Per radeonsi comments 16/64 bpp are more complicated. */
5042 if (info->cpp[0] != 4)
5044 /* We support multi-planar formats, but not when combined with
5045 * additional DCC metadata planes. */
5046 if (info->num_planes > 1)
5054 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
5059 if (*cap - *size < 1) {
5060 uint64_t new_cap = *cap * 2;
5061 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
5069 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
5075 (*mods)[*size] = mod;
5080 add_gfx9_modifiers(const struct amdgpu_device *adev,
5081 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5083 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5084 int pipe_xor_bits = min(8, pipes +
5085 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
5086 int bank_xor_bits = min(8 - pipe_xor_bits,
5087 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
5088 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
5089 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
5092 if (adev->family == AMDGPU_FAMILY_RV) {
5093 /* Raven2 and later */
5094 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
5097 * No _D DCC swizzles yet because we only allow 32bpp, which
5098 * doesn't support _D on DCN
5101 if (has_constant_encode) {
5102 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5103 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5104 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5105 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5106 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5107 AMD_FMT_MOD_SET(DCC, 1) |
5108 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5109 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5110 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5113 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5114 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5115 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5116 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5117 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5118 AMD_FMT_MOD_SET(DCC, 1) |
5119 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5120 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5121 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5123 if (has_constant_encode) {
5124 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5125 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5126 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5127 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5128 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5129 AMD_FMT_MOD_SET(DCC, 1) |
5130 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5131 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5132 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5134 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5135 AMD_FMT_MOD_SET(RB, rb) |
5136 AMD_FMT_MOD_SET(PIPE, pipes));
5139 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5140 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5141 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5142 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5143 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5144 AMD_FMT_MOD_SET(DCC, 1) |
5145 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5146 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5147 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5148 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5149 AMD_FMT_MOD_SET(RB, rb) |
5150 AMD_FMT_MOD_SET(PIPE, pipes));
5154 * Only supported for 64bpp on Raven, will be filtered on format in
5155 * dm_plane_format_mod_supported.
5157 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5158 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5159 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5160 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5161 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5163 if (adev->family == AMDGPU_FAMILY_RV) {
5164 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5165 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5166 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5167 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5168 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5172 * Only supported for 64bpp on Raven, will be filtered on format in
5173 * dm_plane_format_mod_supported.
5175 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5176 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5177 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5179 if (adev->family == AMDGPU_FAMILY_RV) {
5180 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5181 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5182 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5187 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5188 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5190 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5192 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5193 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5194 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5195 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5196 AMD_FMT_MOD_SET(DCC, 1) |
5197 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5198 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5199 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5201 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5202 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5203 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5204 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5205 AMD_FMT_MOD_SET(DCC, 1) |
5206 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5207 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5208 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5209 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5211 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5212 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5213 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5214 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5216 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5217 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5218 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5219 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5222 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5223 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5224 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5225 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5227 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5228 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5229 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5233 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5234 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5236 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5237 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5239 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5240 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5241 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5242 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5243 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5244 AMD_FMT_MOD_SET(DCC, 1) |
5245 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5246 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5247 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5248 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5250 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5251 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5252 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5253 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5254 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5255 AMD_FMT_MOD_SET(DCC, 1) |
5256 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5257 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5258 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5260 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5261 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5262 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5263 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5264 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5265 AMD_FMT_MOD_SET(DCC, 1) |
5266 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5267 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5268 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5269 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5270 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5272 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5273 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5274 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5275 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5276 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5277 AMD_FMT_MOD_SET(DCC, 1) |
5278 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5279 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5280 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5281 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5283 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5284 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5285 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5286 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5287 AMD_FMT_MOD_SET(PACKERS, pkrs));
5289 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5290 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5291 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5292 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5293 AMD_FMT_MOD_SET(PACKERS, pkrs));
5295 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5296 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5297 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5298 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5300 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5301 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5302 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5306 add_gfx11_modifiers(struct amdgpu_device *adev,
5307 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5310 int pipe_xor_bits = 0;
5315 unsigned swizzle_r_x;
5316 uint64_t modifier_r_x;
5317 uint64_t modifier_dcc_best;
5318 uint64_t modifier_dcc_4k;
5320 /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from
5321 * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} */
5322 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
5323 ASSERT(gb_addr_config != 0);
5325 num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
5326 pkrs = ilog2(num_pkrs);
5327 num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES);
5328 pipe_xor_bits = ilog2(num_pipes);
5330 for (i = 0; i < 2; i++) {
5331 /* Insert the best one first. */
5332 /* R_X swizzle modes are the best for rendering and DCC requires them. */
5334 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
5336 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
5338 modifier_r_x = AMD_FMT_MOD |
5339 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
5340 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5341 AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
5342 AMD_FMT_MOD_SET(PACKERS, pkrs);
5344 /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
5345 modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
5346 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
5347 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5348 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
5350 /* DCC settings for 4K and greater resolutions. (required by display hw) */
5351 modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
5352 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5353 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5354 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
5356 add_modifier(mods, size, capacity, modifier_dcc_best);
5357 add_modifier(mods, size, capacity, modifier_dcc_4k);
5359 add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
5360 add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
5362 add_modifier(mods, size, capacity, modifier_r_x);
5365 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5366 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
5367 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
5371 get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5373 uint64_t size = 0, capacity = 128;
5376 /* We have not hooked up any pre-GFX9 modifiers. */
5377 if (adev->family < AMDGPU_FAMILY_AI)
5380 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5382 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5383 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5384 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5385 return *mods ? 0 : -ENOMEM;
5388 switch (adev->family) {
5389 case AMDGPU_FAMILY_AI:
5390 case AMDGPU_FAMILY_RV:
5391 add_gfx9_modifiers(adev, mods, &size, &capacity);
5393 case AMDGPU_FAMILY_NV:
5394 case AMDGPU_FAMILY_VGH:
5395 case AMDGPU_FAMILY_YC:
5396 case AMDGPU_FAMILY_GC_10_3_6:
5397 case AMDGPU_FAMILY_GC_10_3_7:
5398 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5399 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5401 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5403 case AMDGPU_FAMILY_GC_11_0_0:
5404 case AMDGPU_FAMILY_GC_11_0_2:
5405 add_gfx11_modifiers(adev, mods, &size, &capacity);
5409 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5411 /* INVALID marks the end of the list. */
5412 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5421 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5422 const struct amdgpu_framebuffer *afb,
5423 const enum surface_pixel_format format,
5424 const enum dc_rotation_angle rotation,
5425 const struct plane_size *plane_size,
5426 union dc_tiling_info *tiling_info,
5427 struct dc_plane_dcc_param *dcc,
5428 struct dc_plane_address *address,
5429 const bool force_disable_dcc)
5431 const uint64_t modifier = afb->base.modifier;
5434 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5435 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5437 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5438 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5439 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5440 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5443 dcc->meta_pitch = afb->base.pitches[1];
5444 dcc->independent_64b_blks = independent_64b_blks;
5445 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5446 if (independent_64b_blks && independent_128b_blks)
5447 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5448 else if (independent_128b_blks)
5449 dcc->dcc_ind_blk = hubp_ind_block_128b;
5450 else if (independent_64b_blks && !independent_128b_blks)
5451 dcc->dcc_ind_blk = hubp_ind_block_64b;
5453 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5455 if (independent_64b_blks)
5456 dcc->dcc_ind_blk = hubp_ind_block_64b;
5458 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5461 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5462 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5465 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5467 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5473 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5474 const struct amdgpu_framebuffer *afb,
5475 const enum surface_pixel_format format,
5476 const enum dc_rotation_angle rotation,
5477 const uint64_t tiling_flags,
5478 union dc_tiling_info *tiling_info,
5479 struct plane_size *plane_size,
5480 struct dc_plane_dcc_param *dcc,
5481 struct dc_plane_address *address,
5483 bool force_disable_dcc)
5485 const struct drm_framebuffer *fb = &afb->base;
5488 memset(tiling_info, 0, sizeof(*tiling_info));
5489 memset(plane_size, 0, sizeof(*plane_size));
5490 memset(dcc, 0, sizeof(*dcc));
5491 memset(address, 0, sizeof(*address));
5493 address->tmz_surface = tmz_surface;
5495 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5496 uint64_t addr = afb->address + fb->offsets[0];
5498 plane_size->surface_size.x = 0;
5499 plane_size->surface_size.y = 0;
5500 plane_size->surface_size.width = fb->width;
5501 plane_size->surface_size.height = fb->height;
5502 plane_size->surface_pitch =
5503 fb->pitches[0] / fb->format->cpp[0];
5505 address->type = PLN_ADDR_TYPE_GRAPHICS;
5506 address->grph.addr.low_part = lower_32_bits(addr);
5507 address->grph.addr.high_part = upper_32_bits(addr);
5508 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5509 uint64_t luma_addr = afb->address + fb->offsets[0];
5510 uint64_t chroma_addr = afb->address + fb->offsets[1];
5512 plane_size->surface_size.x = 0;
5513 plane_size->surface_size.y = 0;
5514 plane_size->surface_size.width = fb->width;
5515 plane_size->surface_size.height = fb->height;
5516 plane_size->surface_pitch =
5517 fb->pitches[0] / fb->format->cpp[0];
5519 plane_size->chroma_size.x = 0;
5520 plane_size->chroma_size.y = 0;
5521 /* TODO: set these based on surface format */
5522 plane_size->chroma_size.width = fb->width / 2;
5523 plane_size->chroma_size.height = fb->height / 2;
5525 plane_size->chroma_pitch =
5526 fb->pitches[1] / fb->format->cpp[1];
5528 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5529 address->video_progressive.luma_addr.low_part =
5530 lower_32_bits(luma_addr);
5531 address->video_progressive.luma_addr.high_part =
5532 upper_32_bits(luma_addr);
5533 address->video_progressive.chroma_addr.low_part =
5534 lower_32_bits(chroma_addr);
5535 address->video_progressive.chroma_addr.high_part =
5536 upper_32_bits(chroma_addr);
5539 if (adev->family >= AMDGPU_FAMILY_AI) {
5540 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5541 rotation, plane_size,
5548 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5555 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5556 bool *per_pixel_alpha, bool *pre_multiplied_alpha,
5557 bool *global_alpha, int *global_alpha_value)
5559 *per_pixel_alpha = false;
5560 *pre_multiplied_alpha = true;
5561 *global_alpha = false;
5562 *global_alpha_value = 0xff;
5564 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5567 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
5568 plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
5569 static const uint32_t alpha_formats[] = {
5570 DRM_FORMAT_ARGB8888,
5571 DRM_FORMAT_RGBA8888,
5572 DRM_FORMAT_ABGR8888,
5574 uint32_t format = plane_state->fb->format->format;
5577 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5578 if (format == alpha_formats[i]) {
5579 *per_pixel_alpha = true;
5584 if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
5585 *pre_multiplied_alpha = false;
5588 if (plane_state->alpha < 0xffff) {
5589 *global_alpha = true;
5590 *global_alpha_value = plane_state->alpha >> 8;
5595 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5596 const enum surface_pixel_format format,
5597 enum dc_color_space *color_space)
5601 *color_space = COLOR_SPACE_SRGB;
5603 /* DRM color properties only affect non-RGB formats. */
5604 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5607 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5609 switch (plane_state->color_encoding) {
5610 case DRM_COLOR_YCBCR_BT601:
5612 *color_space = COLOR_SPACE_YCBCR601;
5614 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5617 case DRM_COLOR_YCBCR_BT709:
5619 *color_space = COLOR_SPACE_YCBCR709;
5621 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5624 case DRM_COLOR_YCBCR_BT2020:
5626 *color_space = COLOR_SPACE_2020_YCBCR;
5639 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5640 const struct drm_plane_state *plane_state,
5641 const uint64_t tiling_flags,
5642 struct dc_plane_info *plane_info,
5643 struct dc_plane_address *address,
5645 bool force_disable_dcc)
5647 const struct drm_framebuffer *fb = plane_state->fb;
5648 const struct amdgpu_framebuffer *afb =
5649 to_amdgpu_framebuffer(plane_state->fb);
5652 memset(plane_info, 0, sizeof(*plane_info));
5654 switch (fb->format->format) {
5656 plane_info->format =
5657 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5659 case DRM_FORMAT_RGB565:
5660 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5662 case DRM_FORMAT_XRGB8888:
5663 case DRM_FORMAT_ARGB8888:
5664 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5666 case DRM_FORMAT_XRGB2101010:
5667 case DRM_FORMAT_ARGB2101010:
5668 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5670 case DRM_FORMAT_XBGR2101010:
5671 case DRM_FORMAT_ABGR2101010:
5672 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5674 case DRM_FORMAT_XBGR8888:
5675 case DRM_FORMAT_ABGR8888:
5676 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5678 case DRM_FORMAT_NV21:
5679 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5681 case DRM_FORMAT_NV12:
5682 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5684 case DRM_FORMAT_P010:
5685 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5687 case DRM_FORMAT_XRGB16161616F:
5688 case DRM_FORMAT_ARGB16161616F:
5689 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5691 case DRM_FORMAT_XBGR16161616F:
5692 case DRM_FORMAT_ABGR16161616F:
5693 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5695 case DRM_FORMAT_XRGB16161616:
5696 case DRM_FORMAT_ARGB16161616:
5697 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5699 case DRM_FORMAT_XBGR16161616:
5700 case DRM_FORMAT_ABGR16161616:
5701 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5705 "Unsupported screen format %p4cc\n",
5706 &fb->format->format);
5710 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5711 case DRM_MODE_ROTATE_0:
5712 plane_info->rotation = ROTATION_ANGLE_0;
5714 case DRM_MODE_ROTATE_90:
5715 plane_info->rotation = ROTATION_ANGLE_90;
5717 case DRM_MODE_ROTATE_180:
5718 plane_info->rotation = ROTATION_ANGLE_180;
5720 case DRM_MODE_ROTATE_270:
5721 plane_info->rotation = ROTATION_ANGLE_270;
5724 plane_info->rotation = ROTATION_ANGLE_0;
5728 plane_info->visible = true;
5729 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5731 plane_info->layer_index = 0;
5733 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5734 &plane_info->color_space);
5738 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5739 plane_info->rotation, tiling_flags,
5740 &plane_info->tiling_info,
5741 &plane_info->plane_size,
5742 &plane_info->dcc, address, tmz_surface,
5747 fill_blending_from_plane_state(
5748 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
5749 &plane_info->global_alpha, &plane_info->global_alpha_value);
5754 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5755 struct dc_plane_state *dc_plane_state,
5756 struct drm_plane_state *plane_state,
5757 struct drm_crtc_state *crtc_state)
5759 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5760 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5761 struct dc_scaling_info scaling_info;
5762 struct dc_plane_info plane_info;
5764 bool force_disable_dcc = false;
5766 ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5770 dc_plane_state->src_rect = scaling_info.src_rect;
5771 dc_plane_state->dst_rect = scaling_info.dst_rect;
5772 dc_plane_state->clip_rect = scaling_info.clip_rect;
5773 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5775 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5776 ret = fill_dc_plane_info_and_addr(adev, plane_state,
5779 &dc_plane_state->address,
5785 dc_plane_state->format = plane_info.format;
5786 dc_plane_state->color_space = plane_info.color_space;
5787 dc_plane_state->format = plane_info.format;
5788 dc_plane_state->plane_size = plane_info.plane_size;
5789 dc_plane_state->rotation = plane_info.rotation;
5790 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5791 dc_plane_state->stereo_format = plane_info.stereo_format;
5792 dc_plane_state->tiling_info = plane_info.tiling_info;
5793 dc_plane_state->visible = plane_info.visible;
5794 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5795 dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
5796 dc_plane_state->global_alpha = plane_info.global_alpha;
5797 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5798 dc_plane_state->dcc = plane_info.dcc;
5799 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5800 dc_plane_state->flip_int_enabled = true;
5803 * Always set input transfer function, since plane state is refreshed
5806 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5814 * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
5816 * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
5818 * @old_plane_state: Old state of @plane
5819 * @new_plane_state: New state of @plane
5820 * @crtc_state: New state of CRTC connected to the @plane
5821 * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
5823 * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
5824 * (referred to as "damage clips" in DRM nomenclature) that require updating on
5825 * the eDP remote buffer. The responsibility of specifying the dirty regions is
5828 * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
5829 * plane with regions that require flushing to the eDP remote buffer. In
5830 * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
5831 * implicitly provide damage clips without any client support via the plane
5834 * Today, amdgpu_dm only supports the MPO and cursor usecase.
5836 * TODO: Also enable for FB_DAMAGE_CLIPS
5838 static void fill_dc_dirty_rects(struct drm_plane *plane,
5839 struct drm_plane_state *old_plane_state,
5840 struct drm_plane_state *new_plane_state,
5841 struct drm_crtc_state *crtc_state,
5842 struct dc_flip_addrs *flip_addrs)
5844 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5845 struct rect *dirty_rects = flip_addrs->dirty_rects;
5851 flip_addrs->dirty_rect_count = 0;
5854 * Cursor plane has it's own dirty rect update interface. See
5855 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
5857 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5861 * Today, we only consider MPO use-case for PSR SU. If MPO not
5862 * requested, and there is a plane update, do FFU.
5864 if (!dm_crtc_state->mpo_requested) {
5865 dirty_rects[0].x = 0;
5866 dirty_rects[0].y = 0;
5867 dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
5868 dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
5869 flip_addrs->dirty_rect_count = 1;
5870 DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
5871 new_plane_state->plane->base.id,
5872 dm_crtc_state->base.mode.crtc_hdisplay,
5873 dm_crtc_state->base.mode.crtc_vdisplay);
5878 * MPO is requested. Add entire plane bounding box to dirty rects if
5879 * flipped to or damaged.
5881 * If plane is moved or resized, also add old bounding box to dirty
5884 num_clips = drm_plane_get_damage_clips_count(new_plane_state);
5885 fb_changed = old_plane_state->fb->base.id !=
5886 new_plane_state->fb->base.id;
5887 bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
5888 old_plane_state->crtc_y != new_plane_state->crtc_y ||
5889 old_plane_state->crtc_w != new_plane_state->crtc_w ||
5890 old_plane_state->crtc_h != new_plane_state->crtc_h);
5892 DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
5893 new_plane_state->plane->base.id,
5894 bb_changed, fb_changed, num_clips);
5896 if (num_clips || fb_changed || bb_changed) {
5897 dirty_rects[i].x = new_plane_state->crtc_x;
5898 dirty_rects[i].y = new_plane_state->crtc_y;
5899 dirty_rects[i].width = new_plane_state->crtc_w;
5900 dirty_rects[i].height = new_plane_state->crtc_h;
5901 DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
5902 new_plane_state->plane->base.id,
5903 dirty_rects[i].x, dirty_rects[i].y,
5904 dirty_rects[i].width, dirty_rects[i].height);
5908 /* Add old plane bounding-box if plane is moved or resized */
5910 dirty_rects[i].x = old_plane_state->crtc_x;
5911 dirty_rects[i].y = old_plane_state->crtc_y;
5912 dirty_rects[i].width = old_plane_state->crtc_w;
5913 dirty_rects[i].height = old_plane_state->crtc_h;
5914 DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
5915 old_plane_state->plane->base.id,
5916 dirty_rects[i].x, dirty_rects[i].y,
5917 dirty_rects[i].width, dirty_rects[i].height);
5921 flip_addrs->dirty_rect_count = i;
5924 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5925 const struct dm_connector_state *dm_state,
5926 struct dc_stream_state *stream)
5928 enum amdgpu_rmx_type rmx_type;
5930 struct rect src = { 0 }; /* viewport in composition space*/
5931 struct rect dst = { 0 }; /* stream addressable area */
5933 /* no mode. nothing to be done */
5937 /* Full screen scaling by default */
5938 src.width = mode->hdisplay;
5939 src.height = mode->vdisplay;
5940 dst.width = stream->timing.h_addressable;
5941 dst.height = stream->timing.v_addressable;
5944 rmx_type = dm_state->scaling;
5945 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5946 if (src.width * dst.height <
5947 src.height * dst.width) {
5948 /* height needs less upscaling/more downscaling */
5949 dst.width = src.width *
5950 dst.height / src.height;
5952 /* width needs less upscaling/more downscaling */
5953 dst.height = src.height *
5954 dst.width / src.width;
5956 } else if (rmx_type == RMX_CENTER) {
5960 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5961 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5963 if (dm_state->underscan_enable) {
5964 dst.x += dm_state->underscan_hborder / 2;
5965 dst.y += dm_state->underscan_vborder / 2;
5966 dst.width -= dm_state->underscan_hborder;
5967 dst.height -= dm_state->underscan_vborder;
5974 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5975 dst.x, dst.y, dst.width, dst.height);
5979 static enum dc_color_depth
5980 convert_color_depth_from_display_info(const struct drm_connector *connector,
5981 bool is_y420, int requested_bpc)
5988 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5989 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5991 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5993 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5996 bpc = (uint8_t)connector->display_info.bpc;
5997 /* Assume 8 bpc by default if no bpc is specified. */
5998 bpc = bpc ? bpc : 8;
6001 if (requested_bpc > 0) {
6003 * Cap display bpc based on the user requested value.
6005 * The value for state->max_bpc may not correctly updated
6006 * depending on when the connector gets added to the state
6007 * or if this was called outside of atomic check, so it
6008 * can't be used directly.
6010 bpc = min_t(u8, bpc, requested_bpc);
6012 /* Round down to the nearest even number. */
6013 bpc = bpc - (bpc & 1);
6019 * Temporary Work around, DRM doesn't parse color depth for
6020 * EDID revision before 1.4
6021 * TODO: Fix edid parsing
6023 return COLOR_DEPTH_888;
6025 return COLOR_DEPTH_666;
6027 return COLOR_DEPTH_888;
6029 return COLOR_DEPTH_101010;
6031 return COLOR_DEPTH_121212;
6033 return COLOR_DEPTH_141414;
6035 return COLOR_DEPTH_161616;
6037 return COLOR_DEPTH_UNDEFINED;
6041 static enum dc_aspect_ratio
6042 get_aspect_ratio(const struct drm_display_mode *mode_in)
6044 /* 1-1 mapping, since both enums follow the HDMI spec. */
6045 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
6048 static enum dc_color_space
6049 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
6051 enum dc_color_space color_space = COLOR_SPACE_SRGB;
6053 switch (dc_crtc_timing->pixel_encoding) {
6054 case PIXEL_ENCODING_YCBCR422:
6055 case PIXEL_ENCODING_YCBCR444:
6056 case PIXEL_ENCODING_YCBCR420:
6059 * 27030khz is the separation point between HDTV and SDTV
6060 * according to HDMI spec, we use YCbCr709 and YCbCr601
6063 if (dc_crtc_timing->pix_clk_100hz > 270300) {
6064 if (dc_crtc_timing->flags.Y_ONLY)
6066 COLOR_SPACE_YCBCR709_LIMITED;
6068 color_space = COLOR_SPACE_YCBCR709;
6070 if (dc_crtc_timing->flags.Y_ONLY)
6072 COLOR_SPACE_YCBCR601_LIMITED;
6074 color_space = COLOR_SPACE_YCBCR601;
6079 case PIXEL_ENCODING_RGB:
6080 color_space = COLOR_SPACE_SRGB;
6091 static bool adjust_colour_depth_from_display_info(
6092 struct dc_crtc_timing *timing_out,
6093 const struct drm_display_info *info)
6095 enum dc_color_depth depth = timing_out->display_color_depth;
6098 normalized_clk = timing_out->pix_clk_100hz / 10;
6099 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
6100 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
6101 normalized_clk /= 2;
6102 /* Adjusting pix clock following on HDMI spec based on colour depth */
6104 case COLOR_DEPTH_888:
6106 case COLOR_DEPTH_101010:
6107 normalized_clk = (normalized_clk * 30) / 24;
6109 case COLOR_DEPTH_121212:
6110 normalized_clk = (normalized_clk * 36) / 24;
6112 case COLOR_DEPTH_161616:
6113 normalized_clk = (normalized_clk * 48) / 24;
6116 /* The above depths are the only ones valid for HDMI. */
6119 if (normalized_clk <= info->max_tmds_clock) {
6120 timing_out->display_color_depth = depth;
6123 } while (--depth > COLOR_DEPTH_666);
6127 static void fill_stream_properties_from_drm_display_mode(
6128 struct dc_stream_state *stream,
6129 const struct drm_display_mode *mode_in,
6130 const struct drm_connector *connector,
6131 const struct drm_connector_state *connector_state,
6132 const struct dc_stream_state *old_stream,
6135 struct dc_crtc_timing *timing_out = &stream->timing;
6136 const struct drm_display_info *info = &connector->display_info;
6137 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6138 struct hdmi_vendor_infoframe hv_frame;
6139 struct hdmi_avi_infoframe avi_frame;
6141 memset(&hv_frame, 0, sizeof(hv_frame));
6142 memset(&avi_frame, 0, sizeof(avi_frame));
6144 timing_out->h_border_left = 0;
6145 timing_out->h_border_right = 0;
6146 timing_out->v_border_top = 0;
6147 timing_out->v_border_bottom = 0;
6148 /* TODO: un-hardcode */
6149 if (drm_mode_is_420_only(info, mode_in)
6150 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6151 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6152 else if (drm_mode_is_420_also(info, mode_in)
6153 && aconnector->force_yuv420_output)
6154 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6155 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
6156 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6157 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
6159 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
6161 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
6162 timing_out->display_color_depth = convert_color_depth_from_display_info(
6164 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
6166 timing_out->scan_type = SCANNING_TYPE_NODATA;
6167 timing_out->hdmi_vic = 0;
6170 timing_out->vic = old_stream->timing.vic;
6171 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
6172 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
6174 timing_out->vic = drm_match_cea_mode(mode_in);
6175 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
6176 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
6177 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
6178 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
6181 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6182 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
6183 timing_out->vic = avi_frame.video_code;
6184 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
6185 timing_out->hdmi_vic = hv_frame.vic;
6188 if (is_freesync_video_mode(mode_in, aconnector)) {
6189 timing_out->h_addressable = mode_in->hdisplay;
6190 timing_out->h_total = mode_in->htotal;
6191 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
6192 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
6193 timing_out->v_total = mode_in->vtotal;
6194 timing_out->v_addressable = mode_in->vdisplay;
6195 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
6196 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
6197 timing_out->pix_clk_100hz = mode_in->clock * 10;
6199 timing_out->h_addressable = mode_in->crtc_hdisplay;
6200 timing_out->h_total = mode_in->crtc_htotal;
6201 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
6202 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
6203 timing_out->v_total = mode_in->crtc_vtotal;
6204 timing_out->v_addressable = mode_in->crtc_vdisplay;
6205 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
6206 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
6207 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
6210 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
6212 stream->output_color_space = get_output_color_space(timing_out);
6214 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
6215 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
6216 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6217 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
6218 drm_mode_is_420_also(info, mode_in) &&
6219 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
6220 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6221 adjust_colour_depth_from_display_info(timing_out, info);
6226 static void fill_audio_info(struct audio_info *audio_info,
6227 const struct drm_connector *drm_connector,
6228 const struct dc_sink *dc_sink)
6231 int cea_revision = 0;
6232 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
6234 audio_info->manufacture_id = edid_caps->manufacturer_id;
6235 audio_info->product_id = edid_caps->product_id;
6237 cea_revision = drm_connector->display_info.cea_rev;
6239 strscpy(audio_info->display_name,
6240 edid_caps->display_name,
6241 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
6243 if (cea_revision >= 3) {
6244 audio_info->mode_count = edid_caps->audio_mode_count;
6246 for (i = 0; i < audio_info->mode_count; ++i) {
6247 audio_info->modes[i].format_code =
6248 (enum audio_format_code)
6249 (edid_caps->audio_modes[i].format_code);
6250 audio_info->modes[i].channel_count =
6251 edid_caps->audio_modes[i].channel_count;
6252 audio_info->modes[i].sample_rates.all =
6253 edid_caps->audio_modes[i].sample_rate;
6254 audio_info->modes[i].sample_size =
6255 edid_caps->audio_modes[i].sample_size;
6259 audio_info->flags.all = edid_caps->speaker_flags;
6261 /* TODO: We only check for the progressive mode, check for interlace mode too */
6262 if (drm_connector->latency_present[0]) {
6263 audio_info->video_latency = drm_connector->video_latency[0];
6264 audio_info->audio_latency = drm_connector->audio_latency[0];
6267 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6272 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6273 struct drm_display_mode *dst_mode)
6275 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6276 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6277 dst_mode->crtc_clock = src_mode->crtc_clock;
6278 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6279 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
6280 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
6281 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6282 dst_mode->crtc_htotal = src_mode->crtc_htotal;
6283 dst_mode->crtc_hskew = src_mode->crtc_hskew;
6284 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6285 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6286 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6287 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6288 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6292 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6293 const struct drm_display_mode *native_mode,
6296 if (scale_enabled) {
6297 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6298 } else if (native_mode->clock == drm_mode->clock &&
6299 native_mode->htotal == drm_mode->htotal &&
6300 native_mode->vtotal == drm_mode->vtotal) {
6301 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6303 /* no scaling nor amdgpu inserted, no need to patch */
6307 static struct dc_sink *
6308 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6310 struct dc_sink_init_data sink_init_data = { 0 };
6311 struct dc_sink *sink = NULL;
6312 sink_init_data.link = aconnector->dc_link;
6313 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6315 sink = dc_sink_create(&sink_init_data);
6317 DRM_ERROR("Failed to create sink!\n");
6320 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6325 static void set_multisync_trigger_params(
6326 struct dc_stream_state *stream)
6328 struct dc_stream_state *master = NULL;
6330 if (stream->triggered_crtc_reset.enabled) {
6331 master = stream->triggered_crtc_reset.event_source;
6332 stream->triggered_crtc_reset.event =
6333 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6334 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6335 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6339 static void set_master_stream(struct dc_stream_state *stream_set[],
6342 int j, highest_rfr = 0, master_stream = 0;
6344 for (j = 0; j < stream_count; j++) {
6345 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6346 int refresh_rate = 0;
6348 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6349 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6350 if (refresh_rate > highest_rfr) {
6351 highest_rfr = refresh_rate;
6356 for (j = 0; j < stream_count; j++) {
6358 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6362 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6365 struct dc_stream_state *stream;
6367 if (context->stream_count < 2)
6369 for (i = 0; i < context->stream_count ; i++) {
6370 if (!context->streams[i])
6373 * TODO: add a function to read AMD VSDB bits and set
6374 * crtc_sync_master.multi_sync_enabled flag
6375 * For now it's set to false
6379 set_master_stream(context->streams, context->stream_count);
6381 for (i = 0; i < context->stream_count ; i++) {
6382 stream = context->streams[i];
6387 set_multisync_trigger_params(stream);
6391 #if defined(CONFIG_DRM_AMD_DC_DCN)
6392 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6393 struct dc_sink *sink, struct dc_stream_state *stream,
6394 struct dsc_dec_dpcd_caps *dsc_caps)
6396 stream->timing.flags.DSC = 0;
6397 dsc_caps->is_dsc_supported = false;
6399 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6400 sink->sink_signal == SIGNAL_TYPE_EDP)) {
6401 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6402 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6403 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6404 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6405 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6410 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6411 struct dc_sink *sink, struct dc_stream_state *stream,
6412 struct dsc_dec_dpcd_caps *dsc_caps,
6413 uint32_t max_dsc_target_bpp_limit_override)
6415 const struct dc_link_settings *verified_link_cap = NULL;
6416 uint32_t link_bw_in_kbps;
6417 uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6418 struct dc *dc = sink->ctx->dc;
6419 struct dc_dsc_bw_range bw_range = {0};
6420 struct dc_dsc_config dsc_cfg = {0};
6422 verified_link_cap = dc_link_get_link_cap(stream->link);
6423 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6424 edp_min_bpp_x16 = 8 * 16;
6425 edp_max_bpp_x16 = 8 * 16;
6427 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6428 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6430 if (edp_max_bpp_x16 < edp_min_bpp_x16)
6431 edp_min_bpp_x16 = edp_max_bpp_x16;
6433 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6434 dc->debug.dsc_min_slice_height_override,
6435 edp_min_bpp_x16, edp_max_bpp_x16,
6440 if (bw_range.max_kbps < link_bw_in_kbps) {
6441 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6443 dc->debug.dsc_min_slice_height_override,
6444 max_dsc_target_bpp_limit_override,
6448 stream->timing.dsc_cfg = dsc_cfg;
6449 stream->timing.flags.DSC = 1;
6450 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6456 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6458 dc->debug.dsc_min_slice_height_override,
6459 max_dsc_target_bpp_limit_override,
6463 stream->timing.dsc_cfg = dsc_cfg;
6464 stream->timing.flags.DSC = 1;
6468 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6469 struct dc_sink *sink, struct dc_stream_state *stream,
6470 struct dsc_dec_dpcd_caps *dsc_caps)
6472 struct drm_connector *drm_connector = &aconnector->base;
6473 uint32_t link_bandwidth_kbps;
6474 uint32_t max_dsc_target_bpp_limit_override = 0;
6475 struct dc *dc = sink->ctx->dc;
6476 uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6477 uint32_t dsc_max_supported_bw_in_kbps;
6479 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6480 dc_link_get_link_cap(aconnector->dc_link));
6482 if (stream->link && stream->link->local_sink)
6483 max_dsc_target_bpp_limit_override =
6484 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6486 /* Set DSC policy according to dsc_clock_en */
6487 dc_dsc_policy_set_enable_dsc_when_not_needed(
6488 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6490 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6491 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6493 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6495 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6496 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6497 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6499 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6500 max_dsc_target_bpp_limit_override,
6501 link_bandwidth_kbps,
6503 &stream->timing.dsc_cfg)) {
6504 stream->timing.flags.DSC = 1;
6505 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6506 __func__, drm_connector->name);
6508 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6509 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6510 max_supported_bw_in_kbps = link_bandwidth_kbps;
6511 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6513 if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6514 max_supported_bw_in_kbps > 0 &&
6515 dsc_max_supported_bw_in_kbps > 0)
6516 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6518 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6519 max_dsc_target_bpp_limit_override,
6520 dsc_max_supported_bw_in_kbps,
6522 &stream->timing.dsc_cfg)) {
6523 stream->timing.flags.DSC = 1;
6524 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6525 __func__, drm_connector->name);
6530 /* Overwrite the stream flag if DSC is enabled through debugfs */
6531 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6532 stream->timing.flags.DSC = 1;
6534 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6535 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6537 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6538 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6540 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6541 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6543 #endif /* CONFIG_DRM_AMD_DC_DCN */
6546 * DOC: FreeSync Video
6548 * When a userspace application wants to play a video, the content follows a
6549 * standard format definition that usually specifies the FPS for that format.
6550 * The below list illustrates some video format and the expected FPS,
6553 * - TV/NTSC (23.976 FPS)
6556 * - TV/NTSC (29.97 FPS)
6557 * - TV/NTSC (30 FPS)
6558 * - Cinema HFR (48 FPS)
6560 * - Commonly used (60 FPS)
6561 * - Multiples of 24 (48,72,96,120 FPS)
6563 * The list of standards video format is not huge and can be added to the
6564 * connector modeset list beforehand. With that, userspace can leverage
6565 * FreeSync to extends the front porch in order to attain the target refresh
6566 * rate. Such a switch will happen seamlessly, without screen blanking or
6567 * reprogramming of the output in any other way. If the userspace requests a
6568 * modesetting change compatible with FreeSync modes that only differ in the
6569 * refresh rate, DC will skip the full update and avoid blink during the
6570 * transition. For example, the video player can change the modesetting from
6571 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6572 * causing any display blink. This same concept can be applied to a mode
6575 static struct drm_display_mode *
6576 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6577 bool use_probed_modes)
6579 struct drm_display_mode *m, *m_pref = NULL;
6580 u16 current_refresh, highest_refresh;
6581 struct list_head *list_head = use_probed_modes ?
6582 &aconnector->base.probed_modes :
6583 &aconnector->base.modes;
6585 if (aconnector->freesync_vid_base.clock != 0)
6586 return &aconnector->freesync_vid_base;
6588 /* Find the preferred mode */
6589 list_for_each_entry (m, list_head, head) {
6590 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6597 /* Probably an EDID with no preferred mode. Fallback to first entry */
6598 m_pref = list_first_entry_or_null(
6599 &aconnector->base.modes, struct drm_display_mode, head);
6601 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6606 highest_refresh = drm_mode_vrefresh(m_pref);
6609 * Find the mode with highest refresh rate with same resolution.
6610 * For some monitors, preferred mode is not the mode with highest
6611 * supported refresh rate.
6613 list_for_each_entry (m, list_head, head) {
6614 current_refresh = drm_mode_vrefresh(m);
6616 if (m->hdisplay == m_pref->hdisplay &&
6617 m->vdisplay == m_pref->vdisplay &&
6618 highest_refresh < current_refresh) {
6619 highest_refresh = current_refresh;
6624 drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6628 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6629 struct amdgpu_dm_connector *aconnector)
6631 struct drm_display_mode *high_mode;
6634 high_mode = get_highest_refresh_rate_mode(aconnector, false);
6635 if (!high_mode || !mode)
6638 timing_diff = high_mode->vtotal - mode->vtotal;
6640 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6641 high_mode->hdisplay != mode->hdisplay ||
6642 high_mode->vdisplay != mode->vdisplay ||
6643 high_mode->hsync_start != mode->hsync_start ||
6644 high_mode->hsync_end != mode->hsync_end ||
6645 high_mode->htotal != mode->htotal ||
6646 high_mode->hskew != mode->hskew ||
6647 high_mode->vscan != mode->vscan ||
6648 high_mode->vsync_start - mode->vsync_start != timing_diff ||
6649 high_mode->vsync_end - mode->vsync_end != timing_diff)
6655 static struct dc_stream_state *
6656 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6657 const struct drm_display_mode *drm_mode,
6658 const struct dm_connector_state *dm_state,
6659 const struct dc_stream_state *old_stream,
6662 struct drm_display_mode *preferred_mode = NULL;
6663 struct drm_connector *drm_connector;
6664 const struct drm_connector_state *con_state =
6665 dm_state ? &dm_state->base : NULL;
6666 struct dc_stream_state *stream = NULL;
6667 struct drm_display_mode mode = *drm_mode;
6668 struct drm_display_mode saved_mode;
6669 struct drm_display_mode *freesync_mode = NULL;
6670 bool native_mode_found = false;
6671 bool recalculate_timing = false;
6672 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6674 int preferred_refresh = 0;
6675 #if defined(CONFIG_DRM_AMD_DC_DCN)
6676 struct dsc_dec_dpcd_caps dsc_caps;
6678 struct dc_sink *sink = NULL;
6680 memset(&saved_mode, 0, sizeof(saved_mode));
6682 if (aconnector == NULL) {
6683 DRM_ERROR("aconnector is NULL!\n");
6687 drm_connector = &aconnector->base;
6689 if (!aconnector->dc_sink) {
6690 sink = create_fake_sink(aconnector);
6694 sink = aconnector->dc_sink;
6695 dc_sink_retain(sink);
6698 stream = dc_create_stream_for_sink(sink);
6700 if (stream == NULL) {
6701 DRM_ERROR("Failed to create stream for sink!\n");
6705 stream->dm_stream_context = aconnector;
6707 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6708 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6710 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6711 /* Search for preferred mode */
6712 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6713 native_mode_found = true;
6717 if (!native_mode_found)
6718 preferred_mode = list_first_entry_or_null(
6719 &aconnector->base.modes,
6720 struct drm_display_mode,
6723 mode_refresh = drm_mode_vrefresh(&mode);
6725 if (preferred_mode == NULL) {
6727 * This may not be an error, the use case is when we have no
6728 * usermode calls to reset and set mode upon hotplug. In this
6729 * case, we call set mode ourselves to restore the previous mode
6730 * and the modelist may not be filled in in time.
6732 DRM_DEBUG_DRIVER("No preferred mode found\n");
6734 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6735 if (recalculate_timing) {
6736 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6737 drm_mode_copy(&saved_mode, &mode);
6738 drm_mode_copy(&mode, freesync_mode);
6740 decide_crtc_timing_for_drm_display_mode(
6741 &mode, preferred_mode, scale);
6743 preferred_refresh = drm_mode_vrefresh(preferred_mode);
6747 if (recalculate_timing)
6748 drm_mode_set_crtcinfo(&saved_mode, 0);
6750 drm_mode_set_crtcinfo(&mode, 0);
6753 * If scaling is enabled and refresh rate didn't change
6754 * we copy the vic and polarities of the old timings
6756 if (!scale || mode_refresh != preferred_refresh)
6757 fill_stream_properties_from_drm_display_mode(
6758 stream, &mode, &aconnector->base, con_state, NULL,
6761 fill_stream_properties_from_drm_display_mode(
6762 stream, &mode, &aconnector->base, con_state, old_stream,
6765 #if defined(CONFIG_DRM_AMD_DC_DCN)
6766 /* SST DSC determination policy */
6767 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6768 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6769 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6772 update_stream_scaling_settings(&mode, dm_state, stream);
6775 &stream->audio_info,
6779 update_stream_signal(stream, sink);
6781 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6782 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6784 if (stream->link->psr_settings.psr_feature_enabled) {
6786 // should decide stream support vsc sdp colorimetry capability
6787 // before building vsc info packet
6789 stream->use_vsc_sdp_for_colorimetry = false;
6790 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6791 stream->use_vsc_sdp_for_colorimetry =
6792 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6794 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6795 stream->use_vsc_sdp_for_colorimetry = true;
6797 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6798 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6802 dc_sink_release(sink);
6807 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6809 drm_crtc_cleanup(crtc);
6813 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6814 struct drm_crtc_state *state)
6816 struct dm_crtc_state *cur = to_dm_crtc_state(state);
6818 /* TODO Destroy dc_stream objects are stream object is flattened */
6820 dc_stream_release(cur->stream);
6823 __drm_atomic_helper_crtc_destroy_state(state);
6829 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6831 struct dm_crtc_state *state;
6834 dm_crtc_destroy_state(crtc, crtc->state);
6836 state = kzalloc(sizeof(*state), GFP_KERNEL);
6837 if (WARN_ON(!state))
6840 __drm_atomic_helper_crtc_reset(crtc, &state->base);
6843 static struct drm_crtc_state *
6844 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6846 struct dm_crtc_state *state, *cur;
6848 cur = to_dm_crtc_state(crtc->state);
6850 if (WARN_ON(!crtc->state))
6853 state = kzalloc(sizeof(*state), GFP_KERNEL);
6857 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6860 state->stream = cur->stream;
6861 dc_stream_retain(state->stream);
6864 state->active_planes = cur->active_planes;
6865 state->vrr_infopacket = cur->vrr_infopacket;
6866 state->abm_level = cur->abm_level;
6867 state->vrr_supported = cur->vrr_supported;
6868 state->freesync_config = cur->freesync_config;
6869 state->cm_has_degamma = cur->cm_has_degamma;
6870 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6871 state->mpo_requested = cur->mpo_requested;
6872 /* TODO Duplicate dc_stream after objects are stream object is flattened */
6874 return &state->base;
6877 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6878 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6880 crtc_debugfs_init(crtc);
6886 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6888 enum dc_irq_source irq_source;
6889 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6890 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6893 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6895 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6897 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6898 acrtc->crtc_id, enable ? "en" : "dis", rc);
6902 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6904 enum dc_irq_source irq_source;
6905 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6906 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6907 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6908 struct amdgpu_display_manager *dm = &adev->dm;
6909 struct vblank_control_work *work;
6913 /* vblank irq on -> Only need vupdate irq in vrr mode */
6914 if (amdgpu_dm_vrr_active(acrtc_state))
6915 rc = dm_set_vupdate_irq(crtc, true);
6917 /* vblank irq off -> vupdate irq off */
6918 rc = dm_set_vupdate_irq(crtc, false);
6924 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6926 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6929 if (amdgpu_in_reset(adev))
6932 if (dm->vblank_control_workqueue) {
6933 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6937 INIT_WORK(&work->work, vblank_control_worker);
6939 work->acrtc = acrtc;
6940 work->enable = enable;
6942 if (acrtc_state->stream) {
6943 dc_stream_retain(acrtc_state->stream);
6944 work->stream = acrtc_state->stream;
6947 queue_work(dm->vblank_control_workqueue, &work->work);
6953 static int dm_enable_vblank(struct drm_crtc *crtc)
6955 return dm_set_vblank(crtc, true);
6958 static void dm_disable_vblank(struct drm_crtc *crtc)
6960 dm_set_vblank(crtc, false);
6963 /* Implemented only the options currently available for the driver */
6964 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6965 .reset = dm_crtc_reset_state,
6966 .destroy = amdgpu_dm_crtc_destroy,
6967 .set_config = drm_atomic_helper_set_config,
6968 .page_flip = drm_atomic_helper_page_flip,
6969 .atomic_duplicate_state = dm_crtc_duplicate_state,
6970 .atomic_destroy_state = dm_crtc_destroy_state,
6971 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6972 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6973 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6974 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6975 .enable_vblank = dm_enable_vblank,
6976 .disable_vblank = dm_disable_vblank,
6977 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6978 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6979 .late_register = amdgpu_dm_crtc_late_register,
6983 static enum drm_connector_status
6984 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6987 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6991 * 1. This interface is NOT called in context of HPD irq.
6992 * 2. This interface *is called* in context of user-mode ioctl. Which
6993 * makes it a bad place for *any* MST-related activity.
6996 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6997 !aconnector->fake_enable)
6998 connected = (aconnector->dc_sink != NULL);
7000 connected = (aconnector->base.force == DRM_FORCE_ON);
7002 update_subconnector_property(aconnector);
7004 return (connected ? connector_status_connected :
7005 connector_status_disconnected);
7008 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
7009 struct drm_connector_state *connector_state,
7010 struct drm_property *property,
7013 struct drm_device *dev = connector->dev;
7014 struct amdgpu_device *adev = drm_to_adev(dev);
7015 struct dm_connector_state *dm_old_state =
7016 to_dm_connector_state(connector->state);
7017 struct dm_connector_state *dm_new_state =
7018 to_dm_connector_state(connector_state);
7022 if (property == dev->mode_config.scaling_mode_property) {
7023 enum amdgpu_rmx_type rmx_type;
7026 case DRM_MODE_SCALE_CENTER:
7027 rmx_type = RMX_CENTER;
7029 case DRM_MODE_SCALE_ASPECT:
7030 rmx_type = RMX_ASPECT;
7032 case DRM_MODE_SCALE_FULLSCREEN:
7033 rmx_type = RMX_FULL;
7035 case DRM_MODE_SCALE_NONE:
7041 if (dm_old_state->scaling == rmx_type)
7044 dm_new_state->scaling = rmx_type;
7046 } else if (property == adev->mode_info.underscan_hborder_property) {
7047 dm_new_state->underscan_hborder = val;
7049 } else if (property == adev->mode_info.underscan_vborder_property) {
7050 dm_new_state->underscan_vborder = val;
7052 } else if (property == adev->mode_info.underscan_property) {
7053 dm_new_state->underscan_enable = val;
7055 } else if (property == adev->mode_info.abm_level_property) {
7056 dm_new_state->abm_level = val;
7063 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
7064 const struct drm_connector_state *state,
7065 struct drm_property *property,
7068 struct drm_device *dev = connector->dev;
7069 struct amdgpu_device *adev = drm_to_adev(dev);
7070 struct dm_connector_state *dm_state =
7071 to_dm_connector_state(state);
7074 if (property == dev->mode_config.scaling_mode_property) {
7075 switch (dm_state->scaling) {
7077 *val = DRM_MODE_SCALE_CENTER;
7080 *val = DRM_MODE_SCALE_ASPECT;
7083 *val = DRM_MODE_SCALE_FULLSCREEN;
7087 *val = DRM_MODE_SCALE_NONE;
7091 } else if (property == adev->mode_info.underscan_hborder_property) {
7092 *val = dm_state->underscan_hborder;
7094 } else if (property == adev->mode_info.underscan_vborder_property) {
7095 *val = dm_state->underscan_vborder;
7097 } else if (property == adev->mode_info.underscan_property) {
7098 *val = dm_state->underscan_enable;
7100 } else if (property == adev->mode_info.abm_level_property) {
7101 *val = dm_state->abm_level;
7108 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
7110 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
7112 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
7115 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
7117 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7118 const struct dc_link *link = aconnector->dc_link;
7119 struct amdgpu_device *adev = drm_to_adev(connector->dev);
7120 struct amdgpu_display_manager *dm = &adev->dm;
7124 * Call only if mst_mgr was iniitalized before since it's not done
7125 * for all connector types.
7127 if (aconnector->mst_mgr.dev)
7128 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
7130 for (i = 0; i < dm->num_of_edps; i++) {
7131 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
7132 backlight_device_unregister(dm->backlight_dev[i]);
7133 dm->backlight_dev[i] = NULL;
7137 if (aconnector->dc_em_sink)
7138 dc_sink_release(aconnector->dc_em_sink);
7139 aconnector->dc_em_sink = NULL;
7140 if (aconnector->dc_sink)
7141 dc_sink_release(aconnector->dc_sink);
7142 aconnector->dc_sink = NULL;
7144 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
7145 drm_connector_unregister(connector);
7146 drm_connector_cleanup(connector);
7147 if (aconnector->i2c) {
7148 i2c_del_adapter(&aconnector->i2c->base);
7149 kfree(aconnector->i2c);
7151 kfree(aconnector->dm_dp_aux.aux.name);
7156 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
7158 struct dm_connector_state *state =
7159 to_dm_connector_state(connector->state);
7161 if (connector->state)
7162 __drm_atomic_helper_connector_destroy_state(connector->state);
7166 state = kzalloc(sizeof(*state), GFP_KERNEL);
7169 state->scaling = RMX_OFF;
7170 state->underscan_enable = false;
7171 state->underscan_hborder = 0;
7172 state->underscan_vborder = 0;
7173 state->base.max_requested_bpc = 8;
7174 state->vcpi_slots = 0;
7176 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
7177 state->abm_level = amdgpu_dm_abm_level;
7179 __drm_atomic_helper_connector_reset(connector, &state->base);
7183 struct drm_connector_state *
7184 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
7186 struct dm_connector_state *state =
7187 to_dm_connector_state(connector->state);
7189 struct dm_connector_state *new_state =
7190 kmemdup(state, sizeof(*state), GFP_KERNEL);
7195 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
7197 new_state->freesync_capable = state->freesync_capable;
7198 new_state->abm_level = state->abm_level;
7199 new_state->scaling = state->scaling;
7200 new_state->underscan_enable = state->underscan_enable;
7201 new_state->underscan_hborder = state->underscan_hborder;
7202 new_state->underscan_vborder = state->underscan_vborder;
7203 new_state->vcpi_slots = state->vcpi_slots;
7204 new_state->pbn = state->pbn;
7205 return &new_state->base;
7209 amdgpu_dm_connector_late_register(struct drm_connector *connector)
7211 struct amdgpu_dm_connector *amdgpu_dm_connector =
7212 to_amdgpu_dm_connector(connector);
7215 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
7216 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
7217 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
7218 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
7223 #if defined(CONFIG_DEBUG_FS)
7224 connector_debugfs_init(amdgpu_dm_connector);
7230 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
7231 .reset = amdgpu_dm_connector_funcs_reset,
7232 .detect = amdgpu_dm_connector_detect,
7233 .fill_modes = drm_helper_probe_single_connector_modes,
7234 .destroy = amdgpu_dm_connector_destroy,
7235 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
7236 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
7237 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
7238 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
7239 .late_register = amdgpu_dm_connector_late_register,
7240 .early_unregister = amdgpu_dm_connector_unregister
7243 static int get_modes(struct drm_connector *connector)
7245 return amdgpu_dm_connector_get_modes(connector);
7248 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
7250 struct dc_sink_init_data init_params = {
7251 .link = aconnector->dc_link,
7252 .sink_signal = SIGNAL_TYPE_VIRTUAL
7256 if (!aconnector->base.edid_blob_ptr) {
7257 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7258 aconnector->base.name);
7260 aconnector->base.force = DRM_FORCE_OFF;
7261 aconnector->base.override_edid = false;
7265 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7267 aconnector->edid = edid;
7269 aconnector->dc_em_sink = dc_link_add_remote_sink(
7270 aconnector->dc_link,
7272 (edid->extensions + 1) * EDID_LENGTH,
7275 if (aconnector->base.force == DRM_FORCE_ON) {
7276 aconnector->dc_sink = aconnector->dc_link->local_sink ?
7277 aconnector->dc_link->local_sink :
7278 aconnector->dc_em_sink;
7279 dc_sink_retain(aconnector->dc_sink);
7283 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7285 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7288 * In case of headless boot with force on for DP managed connector
7289 * Those settings have to be != 0 to get initial modeset
7291 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7292 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7293 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7297 aconnector->base.override_edid = true;
7298 create_eml_sink(aconnector);
7301 struct dc_stream_state *
7302 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7303 const struct drm_display_mode *drm_mode,
7304 const struct dm_connector_state *dm_state,
7305 const struct dc_stream_state *old_stream)
7307 struct drm_connector *connector = &aconnector->base;
7308 struct amdgpu_device *adev = drm_to_adev(connector->dev);
7309 struct dc_stream_state *stream;
7310 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7311 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7312 enum dc_status dc_result = DC_OK;
7315 stream = create_stream_for_sink(aconnector, drm_mode,
7316 dm_state, old_stream,
7318 if (stream == NULL) {
7319 DRM_ERROR("Failed to create stream for sink!\n");
7323 dc_result = dc_validate_stream(adev->dm.dc, stream);
7324 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
7325 dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
7327 if (dc_result != DC_OK) {
7328 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7333 dc_status_to_str(dc_result));
7335 dc_stream_release(stream);
7337 requested_bpc -= 2; /* lower bpc to retry validation */
7340 } while (stream == NULL && requested_bpc >= 6);
7342 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7343 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7345 aconnector->force_yuv420_output = true;
7346 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7347 dm_state, old_stream);
7348 aconnector->force_yuv420_output = false;
7354 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7355 struct drm_display_mode *mode)
7357 int result = MODE_ERROR;
7358 struct dc_sink *dc_sink;
7359 /* TODO: Unhardcode stream count */
7360 struct dc_stream_state *stream;
7361 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7363 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7364 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7368 * Only run this the first time mode_valid is called to initilialize
7371 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7372 !aconnector->dc_em_sink)
7373 handle_edid_mgmt(aconnector);
7375 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7377 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7378 aconnector->base.force != DRM_FORCE_ON) {
7379 DRM_ERROR("dc_sink is NULL!\n");
7383 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7385 dc_stream_release(stream);
7390 /* TODO: error handling*/
7394 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7395 struct dc_info_packet *out)
7397 struct hdmi_drm_infoframe frame;
7398 unsigned char buf[30]; /* 26 + 4 */
7402 memset(out, 0, sizeof(*out));
7404 if (!state->hdr_output_metadata)
7407 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7411 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7415 /* Static metadata is a fixed 26 bytes + 4 byte header. */
7419 /* Prepare the infopacket for DC. */
7420 switch (state->connector->connector_type) {
7421 case DRM_MODE_CONNECTOR_HDMIA:
7422 out->hb0 = 0x87; /* type */
7423 out->hb1 = 0x01; /* version */
7424 out->hb2 = 0x1A; /* length */
7425 out->sb[0] = buf[3]; /* checksum */
7429 case DRM_MODE_CONNECTOR_DisplayPort:
7430 case DRM_MODE_CONNECTOR_eDP:
7431 out->hb0 = 0x00; /* sdp id, zero */
7432 out->hb1 = 0x87; /* type */
7433 out->hb2 = 0x1D; /* payload len - 1 */
7434 out->hb3 = (0x13 << 2); /* sdp version */
7435 out->sb[0] = 0x01; /* version */
7436 out->sb[1] = 0x1A; /* length */
7444 memcpy(&out->sb[i], &buf[4], 26);
7447 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7448 sizeof(out->sb), false);
7454 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7455 struct drm_atomic_state *state)
7457 struct drm_connector_state *new_con_state =
7458 drm_atomic_get_new_connector_state(state, conn);
7459 struct drm_connector_state *old_con_state =
7460 drm_atomic_get_old_connector_state(state, conn);
7461 struct drm_crtc *crtc = new_con_state->crtc;
7462 struct drm_crtc_state *new_crtc_state;
7465 trace_amdgpu_dm_connector_atomic_check(new_con_state);
7470 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7471 struct dc_info_packet hdr_infopacket;
7473 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7477 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7478 if (IS_ERR(new_crtc_state))
7479 return PTR_ERR(new_crtc_state);
7482 * DC considers the stream backends changed if the
7483 * static metadata changes. Forcing the modeset also
7484 * gives a simple way for userspace to switch from
7485 * 8bpc to 10bpc when setting the metadata to enter
7488 * Changing the static metadata after it's been
7489 * set is permissible, however. So only force a
7490 * modeset if we're entering or exiting HDR.
7492 new_crtc_state->mode_changed =
7493 !old_con_state->hdr_output_metadata ||
7494 !new_con_state->hdr_output_metadata;
7500 static const struct drm_connector_helper_funcs
7501 amdgpu_dm_connector_helper_funcs = {
7503 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7504 * modes will be filtered by drm_mode_validate_size(), and those modes
7505 * are missing after user start lightdm. So we need to renew modes list.
7506 * in get_modes call back, not just return the modes count
7508 .get_modes = get_modes,
7509 .mode_valid = amdgpu_dm_connector_mode_valid,
7510 .atomic_check = amdgpu_dm_connector_atomic_check,
7513 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7517 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7519 struct drm_atomic_state *state = new_crtc_state->state;
7520 struct drm_plane *plane;
7523 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7524 struct drm_plane_state *new_plane_state;
7526 /* Cursor planes are "fake". */
7527 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7530 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7532 if (!new_plane_state) {
7534 * The plane is enable on the CRTC and hasn't changed
7535 * state. This means that it previously passed
7536 * validation and is therefore enabled.
7542 /* We need a framebuffer to be considered enabled. */
7543 num_active += (new_plane_state->fb != NULL);
7549 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7550 struct drm_crtc_state *new_crtc_state)
7552 struct dm_crtc_state *dm_new_crtc_state =
7553 to_dm_crtc_state(new_crtc_state);
7555 dm_new_crtc_state->active_planes = 0;
7557 if (!dm_new_crtc_state->stream)
7560 dm_new_crtc_state->active_planes =
7561 count_crtc_active_planes(new_crtc_state);
7564 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7565 struct drm_atomic_state *state)
7567 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7569 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7570 struct dc *dc = adev->dm.dc;
7571 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7574 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7576 dm_update_crtc_active_planes(crtc, crtc_state);
7578 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7579 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7584 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7585 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7586 * planes are disabled, which is not supported by the hardware. And there is legacy
7587 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7589 if (crtc_state->enable &&
7590 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7591 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7595 /* In some use cases, like reset, no stream is attached */
7596 if (!dm_crtc_state->stream)
7599 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7602 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7606 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7607 const struct drm_display_mode *mode,
7608 struct drm_display_mode *adjusted_mode)
7613 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7614 .disable = dm_crtc_helper_disable,
7615 .atomic_check = dm_crtc_helper_atomic_check,
7616 .mode_fixup = dm_crtc_helper_mode_fixup,
7617 .get_scanout_position = amdgpu_crtc_get_scanout_position,
7620 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7625 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
7627 switch (display_color_depth) {
7628 case COLOR_DEPTH_666:
7630 case COLOR_DEPTH_888:
7632 case COLOR_DEPTH_101010:
7634 case COLOR_DEPTH_121212:
7636 case COLOR_DEPTH_141414:
7638 case COLOR_DEPTH_161616:
7646 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7647 struct drm_crtc_state *crtc_state,
7648 struct drm_connector_state *conn_state)
7650 struct drm_atomic_state *state = crtc_state->state;
7651 struct drm_connector *connector = conn_state->connector;
7652 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7653 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7654 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7655 struct drm_dp_mst_topology_mgr *mst_mgr;
7656 struct drm_dp_mst_port *mst_port;
7657 enum dc_color_depth color_depth;
7659 bool is_y420 = false;
7661 if (!aconnector->port || !aconnector->dc_sink)
7664 mst_port = aconnector->port;
7665 mst_mgr = &aconnector->mst_port->mst_mgr;
7667 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7670 if (!state->duplicated) {
7671 int max_bpc = conn_state->max_requested_bpc;
7672 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7673 aconnector->force_yuv420_output;
7674 color_depth = convert_color_depth_from_display_info(connector,
7677 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7678 clock = adjusted_mode->clock;
7679 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7681 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7684 dm_new_connector_state->pbn,
7685 dm_mst_get_pbn_divider(aconnector->dc_link));
7686 if (dm_new_connector_state->vcpi_slots < 0) {
7687 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7688 return dm_new_connector_state->vcpi_slots;
7693 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7694 .disable = dm_encoder_helper_disable,
7695 .atomic_check = dm_encoder_helper_atomic_check
7698 #if defined(CONFIG_DRM_AMD_DC_DCN)
7699 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7700 struct dc_state *dc_state,
7701 struct dsc_mst_fairness_vars *vars)
7703 struct dc_stream_state *stream = NULL;
7704 struct drm_connector *connector;
7705 struct drm_connector_state *new_con_state;
7706 struct amdgpu_dm_connector *aconnector;
7707 struct dm_connector_state *dm_conn_state;
7709 int vcpi, pbn_div, pbn, slot_num = 0;
7711 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7713 aconnector = to_amdgpu_dm_connector(connector);
7715 if (!aconnector->port)
7718 if (!new_con_state || !new_con_state->crtc)
7721 dm_conn_state = to_dm_connector_state(new_con_state);
7723 for (j = 0; j < dc_state->stream_count; j++) {
7724 stream = dc_state->streams[j];
7728 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7737 pbn_div = dm_mst_get_pbn_divider(stream->link);
7738 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7739 for (j = 0; j < dc_state->stream_count; j++) {
7740 if (vars[j].aconnector == aconnector) {
7746 if (j == dc_state->stream_count)
7749 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7751 if (stream->timing.flags.DSC != 1) {
7752 dm_conn_state->pbn = pbn;
7753 dm_conn_state->vcpi_slots = slot_num;
7755 drm_dp_mst_atomic_enable_dsc(state,
7763 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7770 dm_conn_state->pbn = pbn;
7771 dm_conn_state->vcpi_slots = vcpi;
7777 static void dm_drm_plane_reset(struct drm_plane *plane)
7779 struct dm_plane_state *amdgpu_state = NULL;
7782 plane->funcs->atomic_destroy_state(plane, plane->state);
7784 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7785 WARN_ON(amdgpu_state == NULL);
7788 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7791 static struct drm_plane_state *
7792 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7794 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7796 old_dm_plane_state = to_dm_plane_state(plane->state);
7797 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7798 if (!dm_plane_state)
7801 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7803 if (old_dm_plane_state->dc_state) {
7804 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7805 dc_plane_state_retain(dm_plane_state->dc_state);
7808 return &dm_plane_state->base;
7811 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7812 struct drm_plane_state *state)
7814 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7816 if (dm_plane_state->dc_state)
7817 dc_plane_state_release(dm_plane_state->dc_state);
7819 drm_atomic_helper_plane_destroy_state(plane, state);
7822 static const struct drm_plane_funcs dm_plane_funcs = {
7823 .update_plane = drm_atomic_helper_update_plane,
7824 .disable_plane = drm_atomic_helper_disable_plane,
7825 .destroy = drm_primary_helper_destroy,
7826 .reset = dm_drm_plane_reset,
7827 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7828 .atomic_destroy_state = dm_drm_plane_destroy_state,
7829 .format_mod_supported = dm_plane_format_mod_supported,
7832 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7833 struct drm_plane_state *new_state)
7835 struct amdgpu_framebuffer *afb;
7836 struct drm_gem_object *obj;
7837 struct amdgpu_device *adev;
7838 struct amdgpu_bo *rbo;
7839 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7843 if (!new_state->fb) {
7844 DRM_DEBUG_KMS("No FB bound\n");
7848 afb = to_amdgpu_framebuffer(new_state->fb);
7849 obj = new_state->fb->obj[0];
7850 rbo = gem_to_amdgpu_bo(obj);
7851 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7853 r = amdgpu_bo_reserve(rbo, true);
7855 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7859 r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
7861 dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
7865 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7866 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7868 domain = AMDGPU_GEM_DOMAIN_VRAM;
7870 r = amdgpu_bo_pin(rbo, domain);
7871 if (unlikely(r != 0)) {
7872 if (r != -ERESTARTSYS)
7873 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7877 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7878 if (unlikely(r != 0)) {
7879 DRM_ERROR("%p bind failed\n", rbo);
7883 amdgpu_bo_unreserve(rbo);
7885 afb->address = amdgpu_bo_gpu_offset(rbo);
7890 * We don't do surface updates on planes that have been newly created,
7891 * but we also don't have the afb->address during atomic check.
7893 * Fill in buffer attributes depending on the address here, but only on
7894 * newly created planes since they're not being used by DC yet and this
7895 * won't modify global state.
7897 dm_plane_state_old = to_dm_plane_state(plane->state);
7898 dm_plane_state_new = to_dm_plane_state(new_state);
7900 if (dm_plane_state_new->dc_state &&
7901 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7902 struct dc_plane_state *plane_state =
7903 dm_plane_state_new->dc_state;
7904 bool force_disable_dcc = !plane_state->dcc.enable;
7906 fill_plane_buffer_attributes(
7907 adev, afb, plane_state->format, plane_state->rotation,
7909 &plane_state->tiling_info, &plane_state->plane_size,
7910 &plane_state->dcc, &plane_state->address,
7911 afb->tmz_surface, force_disable_dcc);
7917 amdgpu_bo_unpin(rbo);
7920 amdgpu_bo_unreserve(rbo);
7924 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7925 struct drm_plane_state *old_state)
7927 struct amdgpu_bo *rbo;
7933 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7934 r = amdgpu_bo_reserve(rbo, false);
7936 DRM_ERROR("failed to reserve rbo before unpin\n");
7940 amdgpu_bo_unpin(rbo);
7941 amdgpu_bo_unreserve(rbo);
7942 amdgpu_bo_unref(&rbo);
7945 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7946 struct drm_crtc_state *new_crtc_state)
7948 struct drm_framebuffer *fb = state->fb;
7949 int min_downscale, max_upscale;
7951 int max_scale = INT_MAX;
7953 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7954 if (fb && state->crtc) {
7955 /* Validate viewport to cover the case when only the position changes */
7956 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7957 int viewport_width = state->crtc_w;
7958 int viewport_height = state->crtc_h;
7960 if (state->crtc_x < 0)
7961 viewport_width += state->crtc_x;
7962 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7963 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7965 if (state->crtc_y < 0)
7966 viewport_height += state->crtc_y;
7967 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7968 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7970 if (viewport_width < 0 || viewport_height < 0) {
7971 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7973 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7974 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7976 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7977 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7983 /* Get min/max allowed scaling factors from plane caps. */
7984 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7985 &min_downscale, &max_upscale);
7987 * Convert to drm convention: 16.16 fixed point, instead of dc's
7988 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7989 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7991 min_scale = (1000 << 16) / max_upscale;
7992 max_scale = (1000 << 16) / min_downscale;
7995 return drm_atomic_helper_check_plane_state(
7996 state, new_crtc_state, min_scale, max_scale, true, true);
7999 static int dm_plane_atomic_check(struct drm_plane *plane,
8000 struct drm_atomic_state *state)
8002 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
8004 struct amdgpu_device *adev = drm_to_adev(plane->dev);
8005 struct dc *dc = adev->dm.dc;
8006 struct dm_plane_state *dm_plane_state;
8007 struct dc_scaling_info scaling_info;
8008 struct drm_crtc_state *new_crtc_state;
8011 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
8013 dm_plane_state = to_dm_plane_state(new_plane_state);
8015 if (!dm_plane_state->dc_state)
8019 drm_atomic_get_new_crtc_state(state,
8020 new_plane_state->crtc);
8021 if (!new_crtc_state)
8024 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8028 ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
8032 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
8038 static int dm_plane_atomic_async_check(struct drm_plane *plane,
8039 struct drm_atomic_state *state)
8041 /* Only support async updates on cursor planes. */
8042 if (plane->type != DRM_PLANE_TYPE_CURSOR)
8048 static void dm_plane_atomic_async_update(struct drm_plane *plane,
8049 struct drm_atomic_state *state)
8051 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
8053 struct drm_plane_state *old_state =
8054 drm_atomic_get_old_plane_state(state, plane);
8056 trace_amdgpu_dm_atomic_update_cursor(new_state);
8058 swap(plane->state->fb, new_state->fb);
8060 plane->state->src_x = new_state->src_x;
8061 plane->state->src_y = new_state->src_y;
8062 plane->state->src_w = new_state->src_w;
8063 plane->state->src_h = new_state->src_h;
8064 plane->state->crtc_x = new_state->crtc_x;
8065 plane->state->crtc_y = new_state->crtc_y;
8066 plane->state->crtc_w = new_state->crtc_w;
8067 plane->state->crtc_h = new_state->crtc_h;
8069 handle_cursor_update(plane, old_state);
8072 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
8073 .prepare_fb = dm_plane_helper_prepare_fb,
8074 .cleanup_fb = dm_plane_helper_cleanup_fb,
8075 .atomic_check = dm_plane_atomic_check,
8076 .atomic_async_check = dm_plane_atomic_async_check,
8077 .atomic_async_update = dm_plane_atomic_async_update
8081 * TODO: these are currently initialized to rgb formats only.
8082 * For future use cases we should either initialize them dynamically based on
8083 * plane capabilities, or initialize this array to all formats, so internal drm
8084 * check will succeed, and let DC implement proper check
8086 static const uint32_t rgb_formats[] = {
8087 DRM_FORMAT_XRGB8888,
8088 DRM_FORMAT_ARGB8888,
8089 DRM_FORMAT_RGBA8888,
8090 DRM_FORMAT_XRGB2101010,
8091 DRM_FORMAT_XBGR2101010,
8092 DRM_FORMAT_ARGB2101010,
8093 DRM_FORMAT_ABGR2101010,
8094 DRM_FORMAT_XRGB16161616,
8095 DRM_FORMAT_XBGR16161616,
8096 DRM_FORMAT_ARGB16161616,
8097 DRM_FORMAT_ABGR16161616,
8098 DRM_FORMAT_XBGR8888,
8099 DRM_FORMAT_ABGR8888,
8103 static const uint32_t overlay_formats[] = {
8104 DRM_FORMAT_XRGB8888,
8105 DRM_FORMAT_ARGB8888,
8106 DRM_FORMAT_RGBA8888,
8107 DRM_FORMAT_XBGR8888,
8108 DRM_FORMAT_ABGR8888,
8112 static const u32 cursor_formats[] = {
8116 static int get_plane_formats(const struct drm_plane *plane,
8117 const struct dc_plane_cap *plane_cap,
8118 uint32_t *formats, int max_formats)
8120 int i, num_formats = 0;
8123 * TODO: Query support for each group of formats directly from
8124 * DC plane caps. This will require adding more formats to the
8128 switch (plane->type) {
8129 case DRM_PLANE_TYPE_PRIMARY:
8130 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
8131 if (num_formats >= max_formats)
8134 formats[num_formats++] = rgb_formats[i];
8137 if (plane_cap && plane_cap->pixel_format_support.nv12)
8138 formats[num_formats++] = DRM_FORMAT_NV12;
8139 if (plane_cap && plane_cap->pixel_format_support.p010)
8140 formats[num_formats++] = DRM_FORMAT_P010;
8141 if (plane_cap && plane_cap->pixel_format_support.fp16) {
8142 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
8143 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
8144 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
8145 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
8149 case DRM_PLANE_TYPE_OVERLAY:
8150 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
8151 if (num_formats >= max_formats)
8154 formats[num_formats++] = overlay_formats[i];
8158 case DRM_PLANE_TYPE_CURSOR:
8159 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
8160 if (num_formats >= max_formats)
8163 formats[num_formats++] = cursor_formats[i];
8171 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
8172 struct drm_plane *plane,
8173 unsigned long possible_crtcs,
8174 const struct dc_plane_cap *plane_cap)
8176 uint32_t formats[32];
8179 unsigned int supported_rotations;
8180 uint64_t *modifiers = NULL;
8182 num_formats = get_plane_formats(plane, plane_cap, formats,
8183 ARRAY_SIZE(formats));
8185 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
8189 if (modifiers == NULL)
8190 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
8192 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
8193 &dm_plane_funcs, formats, num_formats,
8194 modifiers, plane->type, NULL);
8199 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
8200 plane_cap && plane_cap->per_pixel_alpha) {
8201 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
8202 BIT(DRM_MODE_BLEND_PREMULTI) |
8203 BIT(DRM_MODE_BLEND_COVERAGE);
8205 drm_plane_create_alpha_property(plane);
8206 drm_plane_create_blend_mode_property(plane, blend_caps);
8209 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
8211 (plane_cap->pixel_format_support.nv12 ||
8212 plane_cap->pixel_format_support.p010)) {
8213 /* This only affects YUV formats. */
8214 drm_plane_create_color_properties(
8216 BIT(DRM_COLOR_YCBCR_BT601) |
8217 BIT(DRM_COLOR_YCBCR_BT709) |
8218 BIT(DRM_COLOR_YCBCR_BT2020),
8219 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
8220 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
8221 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
8224 supported_rotations =
8225 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
8226 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
8228 if (dm->adev->asic_type >= CHIP_BONAIRE &&
8229 plane->type != DRM_PLANE_TYPE_CURSOR)
8230 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
8231 supported_rotations);
8233 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
8235 /* Create (reset) the plane state */
8236 if (plane->funcs->reset)
8237 plane->funcs->reset(plane);
8242 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
8243 struct drm_plane *plane,
8244 uint32_t crtc_index)
8246 struct amdgpu_crtc *acrtc = NULL;
8247 struct drm_plane *cursor_plane;
8251 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
8255 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
8256 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
8258 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
8262 res = drm_crtc_init_with_planes(
8267 &amdgpu_dm_crtc_funcs, NULL);
8272 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8274 /* Create (reset) the plane state */
8275 if (acrtc->base.funcs->reset)
8276 acrtc->base.funcs->reset(&acrtc->base);
8278 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8279 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8281 acrtc->crtc_id = crtc_index;
8282 acrtc->base.enabled = false;
8283 acrtc->otg_inst = -1;
8285 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8286 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8287 true, MAX_COLOR_LUT_ENTRIES);
8288 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8294 kfree(cursor_plane);
8299 static int to_drm_connector_type(enum signal_type st)
8302 case SIGNAL_TYPE_HDMI_TYPE_A:
8303 return DRM_MODE_CONNECTOR_HDMIA;
8304 case SIGNAL_TYPE_EDP:
8305 return DRM_MODE_CONNECTOR_eDP;
8306 case SIGNAL_TYPE_LVDS:
8307 return DRM_MODE_CONNECTOR_LVDS;
8308 case SIGNAL_TYPE_RGB:
8309 return DRM_MODE_CONNECTOR_VGA;
8310 case SIGNAL_TYPE_DISPLAY_PORT:
8311 case SIGNAL_TYPE_DISPLAY_PORT_MST:
8312 return DRM_MODE_CONNECTOR_DisplayPort;
8313 case SIGNAL_TYPE_DVI_DUAL_LINK:
8314 case SIGNAL_TYPE_DVI_SINGLE_LINK:
8315 return DRM_MODE_CONNECTOR_DVID;
8316 case SIGNAL_TYPE_VIRTUAL:
8317 return DRM_MODE_CONNECTOR_VIRTUAL;
8320 return DRM_MODE_CONNECTOR_Unknown;
8324 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8326 struct drm_encoder *encoder;
8328 /* There is only one encoder per connector */
8329 drm_connector_for_each_possible_encoder(connector, encoder)
8335 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8337 struct drm_encoder *encoder;
8338 struct amdgpu_encoder *amdgpu_encoder;
8340 encoder = amdgpu_dm_connector_to_encoder(connector);
8342 if (encoder == NULL)
8345 amdgpu_encoder = to_amdgpu_encoder(encoder);
8347 amdgpu_encoder->native_mode.clock = 0;
8349 if (!list_empty(&connector->probed_modes)) {
8350 struct drm_display_mode *preferred_mode = NULL;
8352 list_for_each_entry(preferred_mode,
8353 &connector->probed_modes,
8355 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8356 amdgpu_encoder->native_mode = *preferred_mode;
8364 static struct drm_display_mode *
8365 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8367 int hdisplay, int vdisplay)
8369 struct drm_device *dev = encoder->dev;
8370 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8371 struct drm_display_mode *mode = NULL;
8372 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8374 mode = drm_mode_duplicate(dev, native_mode);
8379 mode->hdisplay = hdisplay;
8380 mode->vdisplay = vdisplay;
8381 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8382 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8388 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8389 struct drm_connector *connector)
8391 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8392 struct drm_display_mode *mode = NULL;
8393 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8394 struct amdgpu_dm_connector *amdgpu_dm_connector =
8395 to_amdgpu_dm_connector(connector);
8399 char name[DRM_DISPLAY_MODE_LEN];
8402 } common_modes[] = {
8403 { "640x480", 640, 480},
8404 { "800x600", 800, 600},
8405 { "1024x768", 1024, 768},
8406 { "1280x720", 1280, 720},
8407 { "1280x800", 1280, 800},
8408 {"1280x1024", 1280, 1024},
8409 { "1440x900", 1440, 900},
8410 {"1680x1050", 1680, 1050},
8411 {"1600x1200", 1600, 1200},
8412 {"1920x1080", 1920, 1080},
8413 {"1920x1200", 1920, 1200}
8416 n = ARRAY_SIZE(common_modes);
8418 for (i = 0; i < n; i++) {
8419 struct drm_display_mode *curmode = NULL;
8420 bool mode_existed = false;
8422 if (common_modes[i].w > native_mode->hdisplay ||
8423 common_modes[i].h > native_mode->vdisplay ||
8424 (common_modes[i].w == native_mode->hdisplay &&
8425 common_modes[i].h == native_mode->vdisplay))
8428 list_for_each_entry(curmode, &connector->probed_modes, head) {
8429 if (common_modes[i].w == curmode->hdisplay &&
8430 common_modes[i].h == curmode->vdisplay) {
8431 mode_existed = true;
8439 mode = amdgpu_dm_create_common_mode(encoder,
8440 common_modes[i].name, common_modes[i].w,
8445 drm_mode_probed_add(connector, mode);
8446 amdgpu_dm_connector->num_modes++;
8450 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8452 struct drm_encoder *encoder;
8453 struct amdgpu_encoder *amdgpu_encoder;
8454 const struct drm_display_mode *native_mode;
8456 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8457 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8460 encoder = amdgpu_dm_connector_to_encoder(connector);
8464 amdgpu_encoder = to_amdgpu_encoder(encoder);
8466 native_mode = &amdgpu_encoder->native_mode;
8467 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8470 drm_connector_set_panel_orientation_with_quirk(connector,
8471 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8472 native_mode->hdisplay,
8473 native_mode->vdisplay);
8476 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8479 struct amdgpu_dm_connector *amdgpu_dm_connector =
8480 to_amdgpu_dm_connector(connector);
8483 /* empty probed_modes */
8484 INIT_LIST_HEAD(&connector->probed_modes);
8485 amdgpu_dm_connector->num_modes =
8486 drm_add_edid_modes(connector, edid);
8488 /* sorting the probed modes before calling function
8489 * amdgpu_dm_get_native_mode() since EDID can have
8490 * more than one preferred mode. The modes that are
8491 * later in the probed mode list could be of higher
8492 * and preferred resolution. For example, 3840x2160
8493 * resolution in base EDID preferred timing and 4096x2160
8494 * preferred resolution in DID extension block later.
8496 drm_mode_sort(&connector->probed_modes);
8497 amdgpu_dm_get_native_mode(connector);
8499 /* Freesync capabilities are reset by calling
8500 * drm_add_edid_modes() and need to be
8503 amdgpu_dm_update_freesync_caps(connector, edid);
8505 amdgpu_set_panel_orientation(connector);
8507 amdgpu_dm_connector->num_modes = 0;
8511 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8512 struct drm_display_mode *mode)
8514 struct drm_display_mode *m;
8516 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8517 if (drm_mode_equal(m, mode))
8524 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8526 const struct drm_display_mode *m;
8527 struct drm_display_mode *new_mode;
8529 uint32_t new_modes_count = 0;
8531 /* Standard FPS values
8540 * 60 - Commonly used
8541 * 48,72,96,120 - Multiples of 24
8543 static const uint32_t common_rates[] = {
8544 23976, 24000, 25000, 29970, 30000,
8545 48000, 50000, 60000, 72000, 96000, 120000
8549 * Find mode with highest refresh rate with the same resolution
8550 * as the preferred mode. Some monitors report a preferred mode
8551 * with lower resolution than the highest refresh rate supported.
8554 m = get_highest_refresh_rate_mode(aconnector, true);
8558 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8559 uint64_t target_vtotal, target_vtotal_diff;
8562 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8565 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8566 common_rates[i] > aconnector->max_vfreq * 1000)
8569 num = (unsigned long long)m->clock * 1000 * 1000;
8570 den = common_rates[i] * (unsigned long long)m->htotal;
8571 target_vtotal = div_u64(num, den);
8572 target_vtotal_diff = target_vtotal - m->vtotal;
8574 /* Check for illegal modes */
8575 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8576 m->vsync_end + target_vtotal_diff < m->vsync_start ||
8577 m->vtotal + target_vtotal_diff < m->vsync_end)
8580 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8584 new_mode->vtotal += (u16)target_vtotal_diff;
8585 new_mode->vsync_start += (u16)target_vtotal_diff;
8586 new_mode->vsync_end += (u16)target_vtotal_diff;
8587 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8588 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8590 if (!is_duplicate_mode(aconnector, new_mode)) {
8591 drm_mode_probed_add(&aconnector->base, new_mode);
8592 new_modes_count += 1;
8594 drm_mode_destroy(aconnector->base.dev, new_mode);
8597 return new_modes_count;
8600 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8603 struct amdgpu_dm_connector *amdgpu_dm_connector =
8604 to_amdgpu_dm_connector(connector);
8609 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8610 amdgpu_dm_connector->num_modes +=
8611 add_fs_modes(amdgpu_dm_connector);
8614 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8616 struct amdgpu_dm_connector *amdgpu_dm_connector =
8617 to_amdgpu_dm_connector(connector);
8618 struct drm_encoder *encoder;
8619 struct edid *edid = amdgpu_dm_connector->edid;
8621 encoder = amdgpu_dm_connector_to_encoder(connector);
8623 if (!drm_edid_is_valid(edid)) {
8624 amdgpu_dm_connector->num_modes =
8625 drm_add_modes_noedid(connector, 640, 480);
8627 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8628 amdgpu_dm_connector_add_common_modes(encoder, connector);
8629 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8631 amdgpu_dm_fbc_init(connector);
8633 return amdgpu_dm_connector->num_modes;
8636 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8637 struct amdgpu_dm_connector *aconnector,
8639 struct dc_link *link,
8642 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8645 * Some of the properties below require access to state, like bpc.
8646 * Allocate some default initial connector state with our reset helper.
8648 if (aconnector->base.funcs->reset)
8649 aconnector->base.funcs->reset(&aconnector->base);
8651 aconnector->connector_id = link_index;
8652 aconnector->dc_link = link;
8653 aconnector->base.interlace_allowed = false;
8654 aconnector->base.doublescan_allowed = false;
8655 aconnector->base.stereo_allowed = false;
8656 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8657 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8658 aconnector->audio_inst = -1;
8659 mutex_init(&aconnector->hpd_lock);
8662 * configure support HPD hot plug connector_>polled default value is 0
8663 * which means HPD hot plug not supported
8665 switch (connector_type) {
8666 case DRM_MODE_CONNECTOR_HDMIA:
8667 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8668 aconnector->base.ycbcr_420_allowed =
8669 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8671 case DRM_MODE_CONNECTOR_DisplayPort:
8672 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8673 link->link_enc = link_enc_cfg_get_link_enc(link);
8674 ASSERT(link->link_enc);
8676 aconnector->base.ycbcr_420_allowed =
8677 link->link_enc->features.dp_ycbcr420_supported ? true : false;
8679 case DRM_MODE_CONNECTOR_DVID:
8680 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8686 drm_object_attach_property(&aconnector->base.base,
8687 dm->ddev->mode_config.scaling_mode_property,
8688 DRM_MODE_SCALE_NONE);
8690 drm_object_attach_property(&aconnector->base.base,
8691 adev->mode_info.underscan_property,
8693 drm_object_attach_property(&aconnector->base.base,
8694 adev->mode_info.underscan_hborder_property,
8696 drm_object_attach_property(&aconnector->base.base,
8697 adev->mode_info.underscan_vborder_property,
8700 if (!aconnector->mst_port)
8701 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8703 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8704 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8705 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8707 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8708 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8709 drm_object_attach_property(&aconnector->base.base,
8710 adev->mode_info.abm_level_property, 0);
8713 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8714 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8715 connector_type == DRM_MODE_CONNECTOR_eDP) {
8716 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8718 if (!aconnector->mst_port)
8719 drm_connector_attach_vrr_capable_property(&aconnector->base);
8721 #ifdef CONFIG_DRM_AMD_DC_HDCP
8722 if (adev->dm.hdcp_workqueue)
8723 drm_connector_attach_content_protection_property(&aconnector->base, true);
8728 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8729 struct i2c_msg *msgs, int num)
8731 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8732 struct ddc_service *ddc_service = i2c->ddc_service;
8733 struct i2c_command cmd;
8737 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8742 cmd.number_of_payloads = num;
8743 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8746 for (i = 0; i < num; i++) {
8747 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8748 cmd.payloads[i].address = msgs[i].addr;
8749 cmd.payloads[i].length = msgs[i].len;
8750 cmd.payloads[i].data = msgs[i].buf;
8754 ddc_service->ctx->dc,
8755 ddc_service->link->link_index,
8759 kfree(cmd.payloads);
8763 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8765 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8768 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8769 .master_xfer = amdgpu_dm_i2c_xfer,
8770 .functionality = amdgpu_dm_i2c_func,
8773 static struct amdgpu_i2c_adapter *
8774 create_i2c(struct ddc_service *ddc_service,
8778 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8779 struct amdgpu_i2c_adapter *i2c;
8781 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8784 i2c->base.owner = THIS_MODULE;
8785 i2c->base.class = I2C_CLASS_DDC;
8786 i2c->base.dev.parent = &adev->pdev->dev;
8787 i2c->base.algo = &amdgpu_dm_i2c_algo;
8788 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8789 i2c_set_adapdata(&i2c->base, i2c);
8790 i2c->ddc_service = ddc_service;
8797 * Note: this function assumes that dc_link_detect() was called for the
8798 * dc_link which will be represented by this aconnector.
8800 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8801 struct amdgpu_dm_connector *aconnector,
8802 uint32_t link_index,
8803 struct amdgpu_encoder *aencoder)
8807 struct dc *dc = dm->dc;
8808 struct dc_link *link = dc_get_link_at_index(dc, link_index);
8809 struct amdgpu_i2c_adapter *i2c;
8811 link->priv = aconnector;
8813 DRM_DEBUG_DRIVER("%s()\n", __func__);
8815 i2c = create_i2c(link->ddc, link->link_index, &res);
8817 DRM_ERROR("Failed to create i2c adapter data\n");
8821 aconnector->i2c = i2c;
8822 res = i2c_add_adapter(&i2c->base);
8825 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8829 connector_type = to_drm_connector_type(link->connector_signal);
8831 res = drm_connector_init_with_ddc(
8834 &amdgpu_dm_connector_funcs,
8839 DRM_ERROR("connector_init failed\n");
8840 aconnector->connector_id = -1;
8844 drm_connector_helper_add(
8846 &amdgpu_dm_connector_helper_funcs);
8848 amdgpu_dm_connector_init_helper(
8855 drm_connector_attach_encoder(
8856 &aconnector->base, &aencoder->base);
8858 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8859 || connector_type == DRM_MODE_CONNECTOR_eDP)
8860 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8865 aconnector->i2c = NULL;
8870 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8872 switch (adev->mode_info.num_crtc) {
8889 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8890 struct amdgpu_encoder *aencoder,
8891 uint32_t link_index)
8893 struct amdgpu_device *adev = drm_to_adev(dev);
8895 int res = drm_encoder_init(dev,
8897 &amdgpu_dm_encoder_funcs,
8898 DRM_MODE_ENCODER_TMDS,
8901 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8904 aencoder->encoder_id = link_index;
8906 aencoder->encoder_id = -1;
8908 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8913 static void manage_dm_interrupts(struct amdgpu_device *adev,
8914 struct amdgpu_crtc *acrtc,
8918 * We have no guarantee that the frontend index maps to the same
8919 * backend index - some even map to more than one.
8921 * TODO: Use a different interrupt or check DC itself for the mapping.
8924 amdgpu_display_crtc_idx_to_irq_type(
8929 drm_crtc_vblank_on(&acrtc->base);
8932 &adev->pageflip_irq,
8934 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8941 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8949 &adev->pageflip_irq,
8951 drm_crtc_vblank_off(&acrtc->base);
8955 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8956 struct amdgpu_crtc *acrtc)
8959 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8962 * This reads the current state for the IRQ and force reapplies
8963 * the setting to hardware.
8965 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8969 is_scaling_state_different(const struct dm_connector_state *dm_state,
8970 const struct dm_connector_state *old_dm_state)
8972 if (dm_state->scaling != old_dm_state->scaling)
8974 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8975 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8977 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8978 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8980 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8981 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8986 #ifdef CONFIG_DRM_AMD_DC_HDCP
8987 static bool is_content_protection_different(struct drm_connector_state *state,
8988 const struct drm_connector_state *old_state,
8989 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8991 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8992 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8994 /* Handle: Type0/1 change */
8995 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8996 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8997 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9001 /* CP is being re enabled, ignore this
9003 * Handles: ENABLED -> DESIRED
9005 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
9006 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
9007 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
9011 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
9013 * Handles: UNDESIRED -> ENABLED
9015 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
9016 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
9017 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9019 /* Stream removed and re-enabled
9021 * Can sometimes overlap with the HPD case,
9022 * thus set update_hdcp to false to avoid
9023 * setting HDCP multiple times.
9025 * Handles: DESIRED -> DESIRED (Special case)
9027 if (!(old_state->crtc && old_state->crtc->enabled) &&
9028 state->crtc && state->crtc->enabled &&
9029 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
9030 dm_con_state->update_hdcp = false;
9034 /* Hot-plug, headless s3, dpms
9036 * Only start HDCP if the display is connected/enabled.
9037 * update_hdcp flag will be set to false until the next
9040 * Handles: DESIRED -> DESIRED (Special case)
9042 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
9043 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
9044 dm_con_state->update_hdcp = false;
9049 * Handles: UNDESIRED -> UNDESIRED
9050 * DESIRED -> DESIRED
9051 * ENABLED -> ENABLED
9053 if (old_state->content_protection == state->content_protection)
9057 * Handles: UNDESIRED -> DESIRED
9058 * DESIRED -> UNDESIRED
9059 * ENABLED -> UNDESIRED
9061 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
9065 * Handles: DESIRED -> ENABLED
9071 static void remove_stream(struct amdgpu_device *adev,
9072 struct amdgpu_crtc *acrtc,
9073 struct dc_stream_state *stream)
9075 /* this is the update mode case */
9077 acrtc->otg_inst = -1;
9078 acrtc->enabled = false;
9081 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
9082 struct dc_cursor_position *position)
9084 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
9086 int xorigin = 0, yorigin = 0;
9088 if (!crtc || !plane->state->fb)
9091 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
9092 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
9093 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
9095 plane->state->crtc_w,
9096 plane->state->crtc_h);
9100 x = plane->state->crtc_x;
9101 y = plane->state->crtc_y;
9103 if (x <= -amdgpu_crtc->max_cursor_width ||
9104 y <= -amdgpu_crtc->max_cursor_height)
9108 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
9112 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
9115 position->enable = true;
9116 position->translate_by_source = true;
9119 position->x_hotspot = xorigin;
9120 position->y_hotspot = yorigin;
9125 static void handle_cursor_update(struct drm_plane *plane,
9126 struct drm_plane_state *old_plane_state)
9128 struct amdgpu_device *adev = drm_to_adev(plane->dev);
9129 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
9130 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
9131 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
9132 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
9133 uint64_t address = afb ? afb->address : 0;
9134 struct dc_cursor_position position = {0};
9135 struct dc_cursor_attributes attributes;
9138 if (!plane->state->fb && !old_plane_state->fb)
9141 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
9143 amdgpu_crtc->crtc_id,
9144 plane->state->crtc_w,
9145 plane->state->crtc_h);
9147 ret = get_cursor_position(plane, crtc, &position);
9151 if (!position.enable) {
9152 /* turn off cursor */
9153 if (crtc_state && crtc_state->stream) {
9154 mutex_lock(&adev->dm.dc_lock);
9155 dc_stream_set_cursor_position(crtc_state->stream,
9157 mutex_unlock(&adev->dm.dc_lock);
9162 amdgpu_crtc->cursor_width = plane->state->crtc_w;
9163 amdgpu_crtc->cursor_height = plane->state->crtc_h;
9165 memset(&attributes, 0, sizeof(attributes));
9166 attributes.address.high_part = upper_32_bits(address);
9167 attributes.address.low_part = lower_32_bits(address);
9168 attributes.width = plane->state->crtc_w;
9169 attributes.height = plane->state->crtc_h;
9170 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
9171 attributes.rotation_angle = 0;
9172 attributes.attribute_flags.value = 0;
9174 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
9176 if (crtc_state->stream) {
9177 mutex_lock(&adev->dm.dc_lock);
9178 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
9180 DRM_ERROR("DC failed to set cursor attributes\n");
9182 if (!dc_stream_set_cursor_position(crtc_state->stream,
9184 DRM_ERROR("DC failed to set cursor position\n");
9185 mutex_unlock(&adev->dm.dc_lock);
9189 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
9192 assert_spin_locked(&acrtc->base.dev->event_lock);
9193 WARN_ON(acrtc->event);
9195 acrtc->event = acrtc->base.state->event;
9197 /* Set the flip status */
9198 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
9200 /* Mark this event as consumed */
9201 acrtc->base.state->event = NULL;
9203 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
9207 static void update_freesync_state_on_stream(
9208 struct amdgpu_display_manager *dm,
9209 struct dm_crtc_state *new_crtc_state,
9210 struct dc_stream_state *new_stream,
9211 struct dc_plane_state *surface,
9212 u32 flip_timestamp_in_us)
9214 struct mod_vrr_params vrr_params;
9215 struct dc_info_packet vrr_infopacket = {0};
9216 struct amdgpu_device *adev = dm->adev;
9217 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9218 unsigned long flags;
9219 bool pack_sdp_v1_3 = false;
9225 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9226 * For now it's sufficient to just guard against these conditions.
9229 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9232 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9233 vrr_params = acrtc->dm_irq_params.vrr_params;
9236 mod_freesync_handle_preflip(
9237 dm->freesync_module,
9240 flip_timestamp_in_us,
9243 if (adev->family < AMDGPU_FAMILY_AI &&
9244 amdgpu_dm_vrr_active(new_crtc_state)) {
9245 mod_freesync_handle_v_update(dm->freesync_module,
9246 new_stream, &vrr_params);
9248 /* Need to call this before the frame ends. */
9249 dc_stream_adjust_vmin_vmax(dm->dc,
9250 new_crtc_state->stream,
9251 &vrr_params.adjust);
9255 mod_freesync_build_vrr_infopacket(
9256 dm->freesync_module,
9260 TRANSFER_FUNC_UNKNOWN,
9264 new_crtc_state->freesync_timing_changed |=
9265 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9267 sizeof(vrr_params.adjust)) != 0);
9269 new_crtc_state->freesync_vrr_info_changed |=
9270 (memcmp(&new_crtc_state->vrr_infopacket,
9272 sizeof(vrr_infopacket)) != 0);
9274 acrtc->dm_irq_params.vrr_params = vrr_params;
9275 new_crtc_state->vrr_infopacket = vrr_infopacket;
9277 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
9278 new_stream->vrr_infopacket = vrr_infopacket;
9280 if (new_crtc_state->freesync_vrr_info_changed)
9281 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9282 new_crtc_state->base.crtc->base.id,
9283 (int)new_crtc_state->base.vrr_enabled,
9284 (int)vrr_params.state);
9286 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9289 static void update_stream_irq_parameters(
9290 struct amdgpu_display_manager *dm,
9291 struct dm_crtc_state *new_crtc_state)
9293 struct dc_stream_state *new_stream = new_crtc_state->stream;
9294 struct mod_vrr_params vrr_params;
9295 struct mod_freesync_config config = new_crtc_state->freesync_config;
9296 struct amdgpu_device *adev = dm->adev;
9297 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9298 unsigned long flags;
9304 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9305 * For now it's sufficient to just guard against these conditions.
9307 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9310 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9311 vrr_params = acrtc->dm_irq_params.vrr_params;
9313 if (new_crtc_state->vrr_supported &&
9314 config.min_refresh_in_uhz &&
9315 config.max_refresh_in_uhz) {
9317 * if freesync compatible mode was set, config.state will be set
9320 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9321 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9322 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9323 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9324 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9325 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9326 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9328 config.state = new_crtc_state->base.vrr_enabled ?
9329 VRR_STATE_ACTIVE_VARIABLE :
9333 config.state = VRR_STATE_UNSUPPORTED;
9336 mod_freesync_build_vrr_params(dm->freesync_module,
9338 &config, &vrr_params);
9340 new_crtc_state->freesync_timing_changed |=
9341 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9342 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9344 new_crtc_state->freesync_config = config;
9345 /* Copy state for access from DM IRQ handler */
9346 acrtc->dm_irq_params.freesync_config = config;
9347 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9348 acrtc->dm_irq_params.vrr_params = vrr_params;
9349 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9352 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9353 struct dm_crtc_state *new_state)
9355 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9356 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9358 if (!old_vrr_active && new_vrr_active) {
9359 /* Transition VRR inactive -> active:
9360 * While VRR is active, we must not disable vblank irq, as a
9361 * reenable after disable would compute bogus vblank/pflip
9362 * timestamps if it likely happened inside display front-porch.
9364 * We also need vupdate irq for the actual core vblank handling
9367 dm_set_vupdate_irq(new_state->base.crtc, true);
9368 drm_crtc_vblank_get(new_state->base.crtc);
9369 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9370 __func__, new_state->base.crtc->base.id);
9371 } else if (old_vrr_active && !new_vrr_active) {
9372 /* Transition VRR active -> inactive:
9373 * Allow vblank irq disable again for fixed refresh rate.
9375 dm_set_vupdate_irq(new_state->base.crtc, false);
9376 drm_crtc_vblank_put(new_state->base.crtc);
9377 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9378 __func__, new_state->base.crtc->base.id);
9382 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9384 struct drm_plane *plane;
9385 struct drm_plane_state *old_plane_state;
9389 * TODO: Make this per-stream so we don't issue redundant updates for
9390 * commits with multiple streams.
9392 for_each_old_plane_in_state(state, plane, old_plane_state, i)
9393 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9394 handle_cursor_update(plane, old_plane_state);
9397 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9398 struct dc_state *dc_state,
9399 struct drm_device *dev,
9400 struct amdgpu_display_manager *dm,
9401 struct drm_crtc *pcrtc,
9402 bool wait_for_vblank)
9405 uint64_t timestamp_ns;
9406 struct drm_plane *plane;
9407 struct drm_plane_state *old_plane_state, *new_plane_state;
9408 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9409 struct drm_crtc_state *new_pcrtc_state =
9410 drm_atomic_get_new_crtc_state(state, pcrtc);
9411 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9412 struct dm_crtc_state *dm_old_crtc_state =
9413 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9414 int planes_count = 0, vpos, hpos;
9416 unsigned long flags;
9417 struct amdgpu_bo *abo;
9418 uint32_t target_vblank, last_flip_vblank;
9419 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9420 bool cursor_update = false;
9421 bool pflip_present = false;
9423 struct dc_surface_update surface_updates[MAX_SURFACES];
9424 struct dc_plane_info plane_infos[MAX_SURFACES];
9425 struct dc_scaling_info scaling_infos[MAX_SURFACES];
9426 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9427 struct dc_stream_update stream_update;
9430 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9433 dm_error("Failed to allocate update bundle\n");
9438 * Disable the cursor first if we're disabling all the planes.
9439 * It'll remain on the screen after the planes are re-enabled
9442 if (acrtc_state->active_planes == 0)
9443 amdgpu_dm_commit_cursors(state);
9445 /* update planes when needed */
9446 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9447 struct drm_crtc *crtc = new_plane_state->crtc;
9448 struct drm_crtc_state *new_crtc_state;
9449 struct drm_framebuffer *fb = new_plane_state->fb;
9450 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9451 bool plane_needs_flip;
9452 struct dc_plane_state *dc_plane;
9453 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9455 /* Cursor plane is handled after stream updates */
9456 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9457 if ((fb && crtc == pcrtc) ||
9458 (old_plane_state->fb && old_plane_state->crtc == pcrtc))
9459 cursor_update = true;
9464 if (!fb || !crtc || pcrtc != crtc)
9467 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9468 if (!new_crtc_state->active)
9471 dc_plane = dm_new_plane_state->dc_state;
9473 bundle->surface_updates[planes_count].surface = dc_plane;
9474 if (new_pcrtc_state->color_mgmt_changed) {
9475 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9476 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9477 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9480 fill_dc_scaling_info(dm->adev, new_plane_state,
9481 &bundle->scaling_infos[planes_count]);
9483 bundle->surface_updates[planes_count].scaling_info =
9484 &bundle->scaling_infos[planes_count];
9486 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9488 pflip_present = pflip_present || plane_needs_flip;
9490 if (!plane_needs_flip) {
9495 abo = gem_to_amdgpu_bo(fb->obj[0]);
9498 * Wait for all fences on this FB. Do limited wait to avoid
9499 * deadlock during GPU reset when this fence will not signal
9500 * but we hold reservation lock for the BO.
9502 r = dma_resv_wait_timeout(abo->tbo.base.resv,
9503 DMA_RESV_USAGE_WRITE, false,
9504 msecs_to_jiffies(5000));
9505 if (unlikely(r <= 0))
9506 DRM_ERROR("Waiting for fences timed out!");
9508 fill_dc_plane_info_and_addr(
9509 dm->adev, new_plane_state,
9511 &bundle->plane_infos[planes_count],
9512 &bundle->flip_addrs[planes_count].address,
9513 afb->tmz_surface, false);
9515 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
9516 new_plane_state->plane->index,
9517 bundle->plane_infos[planes_count].dcc.enable);
9519 bundle->surface_updates[planes_count].plane_info =
9520 &bundle->plane_infos[planes_count];
9522 fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
9524 &bundle->flip_addrs[planes_count]);
9527 * Only allow immediate flips for fast updates that don't
9528 * change FB pitch, DCC state, rotation or mirroing.
9530 bundle->flip_addrs[planes_count].flip_immediate =
9531 crtc->state->async_flip &&
9532 acrtc_state->update_type == UPDATE_TYPE_FAST;
9534 timestamp_ns = ktime_get_ns();
9535 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9536 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9537 bundle->surface_updates[planes_count].surface = dc_plane;
9539 if (!bundle->surface_updates[planes_count].surface) {
9540 DRM_ERROR("No surface for CRTC: id=%d\n",
9541 acrtc_attach->crtc_id);
9545 if (plane == pcrtc->primary)
9546 update_freesync_state_on_stream(
9549 acrtc_state->stream,
9551 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9553 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
9555 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9556 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9562 if (pflip_present) {
9564 /* Use old throttling in non-vrr fixed refresh rate mode
9565 * to keep flip scheduling based on target vblank counts
9566 * working in a backwards compatible way, e.g., for
9567 * clients using the GLX_OML_sync_control extension or
9568 * DRI3/Present extension with defined target_msc.
9570 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9573 /* For variable refresh rate mode only:
9574 * Get vblank of last completed flip to avoid > 1 vrr
9575 * flips per video frame by use of throttling, but allow
9576 * flip programming anywhere in the possibly large
9577 * variable vrr vblank interval for fine-grained flip
9578 * timing control and more opportunity to avoid stutter
9579 * on late submission of flips.
9581 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9582 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9583 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9586 target_vblank = last_flip_vblank + wait_for_vblank;
9589 * Wait until we're out of the vertical blank period before the one
9590 * targeted by the flip
9592 while ((acrtc_attach->enabled &&
9593 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9594 0, &vpos, &hpos, NULL,
9595 NULL, &pcrtc->hwmode)
9596 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9597 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9598 (int)(target_vblank -
9599 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9600 usleep_range(1000, 1100);
9604 * Prepare the flip event for the pageflip interrupt to handle.
9606 * This only works in the case where we've already turned on the
9607 * appropriate hardware blocks (eg. HUBP) so in the transition case
9608 * from 0 -> n planes we have to skip a hardware generated event
9609 * and rely on sending it from software.
9611 if (acrtc_attach->base.state->event &&
9612 acrtc_state->active_planes > 0) {
9613 drm_crtc_vblank_get(pcrtc);
9615 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9617 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9618 prepare_flip_isr(acrtc_attach);
9620 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9623 if (acrtc_state->stream) {
9624 if (acrtc_state->freesync_vrr_info_changed)
9625 bundle->stream_update.vrr_infopacket =
9626 &acrtc_state->stream->vrr_infopacket;
9628 } else if (cursor_update && acrtc_state->active_planes > 0 &&
9629 acrtc_attach->base.state->event) {
9630 drm_crtc_vblank_get(pcrtc);
9632 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9634 acrtc_attach->event = acrtc_attach->base.state->event;
9635 acrtc_attach->base.state->event = NULL;
9637 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9640 /* Update the planes if changed or disable if we don't have any. */
9641 if ((planes_count || acrtc_state->active_planes == 0) &&
9642 acrtc_state->stream) {
9644 * If PSR or idle optimizations are enabled then flush out
9645 * any pending work before hardware programming.
9647 if (dm->vblank_control_workqueue)
9648 flush_workqueue(dm->vblank_control_workqueue);
9650 bundle->stream_update.stream = acrtc_state->stream;
9651 if (new_pcrtc_state->mode_changed) {
9652 bundle->stream_update.src = acrtc_state->stream->src;
9653 bundle->stream_update.dst = acrtc_state->stream->dst;
9656 if (new_pcrtc_state->color_mgmt_changed) {
9658 * TODO: This isn't fully correct since we've actually
9659 * already modified the stream in place.
9661 bundle->stream_update.gamut_remap =
9662 &acrtc_state->stream->gamut_remap_matrix;
9663 bundle->stream_update.output_csc_transform =
9664 &acrtc_state->stream->csc_color_matrix;
9665 bundle->stream_update.out_transfer_func =
9666 acrtc_state->stream->out_transfer_func;
9669 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9670 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9671 bundle->stream_update.abm_level = &acrtc_state->abm_level;
9674 * If FreeSync state on the stream has changed then we need to
9675 * re-adjust the min/max bounds now that DC doesn't handle this
9676 * as part of commit.
9678 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9679 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9680 dc_stream_adjust_vmin_vmax(
9681 dm->dc, acrtc_state->stream,
9682 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9683 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9685 mutex_lock(&dm->dc_lock);
9686 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9687 acrtc_state->stream->link->psr_settings.psr_allow_active)
9688 amdgpu_dm_psr_disable(acrtc_state->stream);
9690 dc_commit_updates_for_stream(dm->dc,
9691 bundle->surface_updates,
9693 acrtc_state->stream,
9694 &bundle->stream_update,
9698 * Enable or disable the interrupts on the backend.
9700 * Most pipes are put into power gating when unused.
9702 * When power gating is enabled on a pipe we lose the
9703 * interrupt enablement state when power gating is disabled.
9705 * So we need to update the IRQ control state in hardware
9706 * whenever the pipe turns on (since it could be previously
9707 * power gated) or off (since some pipes can't be power gated
9710 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9711 dm_update_pflip_irq_state(drm_to_adev(dev),
9714 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9715 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9716 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9717 amdgpu_dm_link_setup_psr(acrtc_state->stream);
9719 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9720 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9721 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9722 struct amdgpu_dm_connector *aconn =
9723 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9725 if (aconn->psr_skip_count > 0)
9726 aconn->psr_skip_count--;
9728 /* Allow PSR when skip count is 0. */
9729 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9732 * If sink supports PSR SU, there is no need to rely on
9733 * a vblank event disable request to enable PSR. PSR SU
9734 * can be enabled immediately once OS demonstrates an
9735 * adequate number of fast atomic commits to notify KMD
9736 * of update events. See `vblank_control_worker()`.
9738 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
9739 acrtc_attach->dm_irq_params.allow_psr_entry &&
9740 !acrtc_state->stream->link->psr_settings.psr_allow_active)
9741 amdgpu_dm_psr_enable(acrtc_state->stream);
9743 acrtc_attach->dm_irq_params.allow_psr_entry = false;
9746 mutex_unlock(&dm->dc_lock);
9750 * Update cursor state *after* programming all the planes.
9751 * This avoids redundant programming in the case where we're going
9752 * to be disabling a single plane - those pipes are being disabled.
9754 if (acrtc_state->active_planes)
9755 amdgpu_dm_commit_cursors(state);
9761 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9762 struct drm_atomic_state *state)
9764 struct amdgpu_device *adev = drm_to_adev(dev);
9765 struct amdgpu_dm_connector *aconnector;
9766 struct drm_connector *connector;
9767 struct drm_connector_state *old_con_state, *new_con_state;
9768 struct drm_crtc_state *new_crtc_state;
9769 struct dm_crtc_state *new_dm_crtc_state;
9770 const struct dc_stream_status *status;
9773 /* Notify device removals. */
9774 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9775 if (old_con_state->crtc != new_con_state->crtc) {
9776 /* CRTC changes require notification. */
9780 if (!new_con_state->crtc)
9783 new_crtc_state = drm_atomic_get_new_crtc_state(
9784 state, new_con_state->crtc);
9786 if (!new_crtc_state)
9789 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9793 aconnector = to_amdgpu_dm_connector(connector);
9795 mutex_lock(&adev->dm.audio_lock);
9796 inst = aconnector->audio_inst;
9797 aconnector->audio_inst = -1;
9798 mutex_unlock(&adev->dm.audio_lock);
9800 amdgpu_dm_audio_eld_notify(adev, inst);
9803 /* Notify audio device additions. */
9804 for_each_new_connector_in_state(state, connector, new_con_state, i) {
9805 if (!new_con_state->crtc)
9808 new_crtc_state = drm_atomic_get_new_crtc_state(
9809 state, new_con_state->crtc);
9811 if (!new_crtc_state)
9814 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9817 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9818 if (!new_dm_crtc_state->stream)
9821 status = dc_stream_get_status(new_dm_crtc_state->stream);
9825 aconnector = to_amdgpu_dm_connector(connector);
9827 mutex_lock(&adev->dm.audio_lock);
9828 inst = status->audio_inst;
9829 aconnector->audio_inst = inst;
9830 mutex_unlock(&adev->dm.audio_lock);
9832 amdgpu_dm_audio_eld_notify(adev, inst);
9837 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9838 * @crtc_state: the DRM CRTC state
9839 * @stream_state: the DC stream state.
9841 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9842 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9844 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9845 struct dc_stream_state *stream_state)
9847 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9851 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9852 * @state: The atomic state to commit
9854 * This will tell DC to commit the constructed DC state from atomic_check,
9855 * programming the hardware. Any failures here implies a hardware failure, since
9856 * atomic check should have filtered anything non-kosher.
9858 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9860 struct drm_device *dev = state->dev;
9861 struct amdgpu_device *adev = drm_to_adev(dev);
9862 struct amdgpu_display_manager *dm = &adev->dm;
9863 struct dm_atomic_state *dm_state;
9864 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9866 struct drm_crtc *crtc;
9867 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9868 unsigned long flags;
9869 bool wait_for_vblank = true;
9870 struct drm_connector *connector;
9871 struct drm_connector_state *old_con_state, *new_con_state;
9872 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9873 int crtc_disable_count = 0;
9874 bool mode_set_reset_required = false;
9876 trace_amdgpu_dm_atomic_commit_tail_begin(state);
9878 drm_atomic_helper_update_legacy_modeset_state(dev, state);
9880 dm_state = dm_atomic_get_new_state(state);
9881 if (dm_state && dm_state->context) {
9882 dc_state = dm_state->context;
9884 /* No state changes, retain current state. */
9885 dc_state_temp = dc_create_state(dm->dc);
9886 ASSERT(dc_state_temp);
9887 dc_state = dc_state_temp;
9888 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9891 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9892 new_crtc_state, i) {
9893 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9895 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9897 if (old_crtc_state->active &&
9898 (!new_crtc_state->active ||
9899 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9900 manage_dm_interrupts(adev, acrtc, false);
9901 dc_stream_release(dm_old_crtc_state->stream);
9905 drm_atomic_helper_calc_timestamping_constants(state);
9907 /* update changed items */
9908 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9909 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9911 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9912 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9914 drm_dbg_state(state->dev,
9915 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9916 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9917 "connectors_changed:%d\n",
9919 new_crtc_state->enable,
9920 new_crtc_state->active,
9921 new_crtc_state->planes_changed,
9922 new_crtc_state->mode_changed,
9923 new_crtc_state->active_changed,
9924 new_crtc_state->connectors_changed);
9926 /* Disable cursor if disabling crtc */
9927 if (old_crtc_state->active && !new_crtc_state->active) {
9928 struct dc_cursor_position position;
9930 memset(&position, 0, sizeof(position));
9931 mutex_lock(&dm->dc_lock);
9932 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9933 mutex_unlock(&dm->dc_lock);
9936 /* Copy all transient state flags into dc state */
9937 if (dm_new_crtc_state->stream) {
9938 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9939 dm_new_crtc_state->stream);
9942 /* handles headless hotplug case, updating new_state and
9943 * aconnector as needed
9946 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9948 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9950 if (!dm_new_crtc_state->stream) {
9952 * this could happen because of issues with
9953 * userspace notifications delivery.
9954 * In this case userspace tries to set mode on
9955 * display which is disconnected in fact.
9956 * dc_sink is NULL in this case on aconnector.
9957 * We expect reset mode will come soon.
9959 * This can also happen when unplug is done
9960 * during resume sequence ended
9962 * In this case, we want to pretend we still
9963 * have a sink to keep the pipe running so that
9964 * hw state is consistent with the sw state
9966 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9967 __func__, acrtc->base.base.id);
9971 if (dm_old_crtc_state->stream)
9972 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9974 pm_runtime_get_noresume(dev->dev);
9976 acrtc->enabled = true;
9977 acrtc->hw_mode = new_crtc_state->mode;
9978 crtc->hwmode = new_crtc_state->mode;
9979 mode_set_reset_required = true;
9980 } else if (modereset_required(new_crtc_state)) {
9981 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9982 /* i.e. reset mode */
9983 if (dm_old_crtc_state->stream)
9984 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9986 mode_set_reset_required = true;
9988 } /* for_each_crtc_in_state() */
9991 /* if there mode set or reset, disable eDP PSR */
9992 if (mode_set_reset_required) {
9993 if (dm->vblank_control_workqueue)
9994 flush_workqueue(dm->vblank_control_workqueue);
9996 amdgpu_dm_psr_disable_all(dm);
9999 dm_enable_per_frame_crtc_master_sync(dc_state);
10000 mutex_lock(&dm->dc_lock);
10001 WARN_ON(!dc_commit_state(dm->dc, dc_state));
10003 /* Allow idle optimization when vblank count is 0 for display off */
10004 if (dm->active_vblank_irq_count == 0)
10005 dc_allow_idle_optimizations(dm->dc, true);
10006 mutex_unlock(&dm->dc_lock);
10009 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10010 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
10012 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10014 if (dm_new_crtc_state->stream != NULL) {
10015 const struct dc_stream_status *status =
10016 dc_stream_get_status(dm_new_crtc_state->stream);
10019 status = dc_stream_get_status_from_state(dc_state,
10020 dm_new_crtc_state->stream);
10022 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
10024 acrtc->otg_inst = status->primary_otg_inst;
10027 #ifdef CONFIG_DRM_AMD_DC_HDCP
10028 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10029 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10030 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10031 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10033 new_crtc_state = NULL;
10036 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
10038 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10040 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
10041 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
10042 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
10043 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
10044 dm_new_con_state->update_hdcp = true;
10048 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
10049 hdcp_update_display(
10050 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
10051 new_con_state->hdcp_content_type,
10052 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
10056 /* Handle connector state changes */
10057 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10058 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10059 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10060 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10061 struct dc_surface_update dummy_updates[MAX_SURFACES];
10062 struct dc_stream_update stream_update;
10063 struct dc_info_packet hdr_packet;
10064 struct dc_stream_status *status = NULL;
10065 bool abm_changed, hdr_changed, scaling_changed;
10067 memset(&dummy_updates, 0, sizeof(dummy_updates));
10068 memset(&stream_update, 0, sizeof(stream_update));
10071 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
10072 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
10075 /* Skip any modesets/resets */
10076 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
10079 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10080 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10082 scaling_changed = is_scaling_state_different(dm_new_con_state,
10085 abm_changed = dm_new_crtc_state->abm_level !=
10086 dm_old_crtc_state->abm_level;
10089 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
10091 if (!scaling_changed && !abm_changed && !hdr_changed)
10094 stream_update.stream = dm_new_crtc_state->stream;
10095 if (scaling_changed) {
10096 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
10097 dm_new_con_state, dm_new_crtc_state->stream);
10099 stream_update.src = dm_new_crtc_state->stream->src;
10100 stream_update.dst = dm_new_crtc_state->stream->dst;
10104 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
10106 stream_update.abm_level = &dm_new_crtc_state->abm_level;
10110 fill_hdr_info_packet(new_con_state, &hdr_packet);
10111 stream_update.hdr_static_metadata = &hdr_packet;
10114 status = dc_stream_get_status(dm_new_crtc_state->stream);
10116 if (WARN_ON(!status))
10119 WARN_ON(!status->plane_count);
10122 * TODO: DC refuses to perform stream updates without a dc_surface_update.
10123 * Here we create an empty update on each plane.
10124 * To fix this, DC should permit updating only stream properties.
10126 for (j = 0; j < status->plane_count; j++)
10127 dummy_updates[j].surface = status->plane_states[0];
10130 mutex_lock(&dm->dc_lock);
10131 dc_commit_updates_for_stream(dm->dc,
10133 status->plane_count,
10134 dm_new_crtc_state->stream,
10137 mutex_unlock(&dm->dc_lock);
10140 /* Count number of newly disabled CRTCs for dropping PM refs later. */
10141 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
10142 new_crtc_state, i) {
10143 if (old_crtc_state->active && !new_crtc_state->active)
10144 crtc_disable_count++;
10146 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10147 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10149 /* For freesync config update on crtc state and params for irq */
10150 update_stream_irq_parameters(dm, dm_new_crtc_state);
10152 /* Handle vrr on->off / off->on transitions */
10153 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
10154 dm_new_crtc_state);
10158 * Enable interrupts for CRTCs that are newly enabled or went through
10159 * a modeset. It was intentionally deferred until after the front end
10160 * state was modified to wait until the OTG was on and so the IRQ
10161 * handlers didn't access stale or invalid state.
10163 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10164 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
10165 #ifdef CONFIG_DEBUG_FS
10166 bool configure_crc = false;
10167 enum amdgpu_dm_pipe_crc_source cur_crc_src;
10168 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
10169 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
10171 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10172 cur_crc_src = acrtc->dm_irq_params.crc_src;
10173 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10175 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10177 if (new_crtc_state->active &&
10178 (!old_crtc_state->active ||
10179 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
10180 dc_stream_retain(dm_new_crtc_state->stream);
10181 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
10182 manage_dm_interrupts(adev, acrtc, true);
10184 #ifdef CONFIG_DEBUG_FS
10186 * Frontend may have changed so reapply the CRC capture
10187 * settings for the stream.
10189 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10191 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
10192 configure_crc = true;
10193 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
10194 if (amdgpu_dm_crc_window_is_activated(crtc)) {
10195 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10196 acrtc->dm_irq_params.crc_window.update_win = true;
10197 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
10198 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
10199 crc_rd_wrk->crtc = crtc;
10200 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
10201 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10207 if (amdgpu_dm_crtc_configure_crc_source(
10208 crtc, dm_new_crtc_state, cur_crc_src))
10209 DRM_DEBUG_DRIVER("Failed to configure crc source");
10214 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
10215 if (new_crtc_state->async_flip)
10216 wait_for_vblank = false;
10218 /* update planes when needed per crtc*/
10219 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
10220 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10222 if (dm_new_crtc_state->stream)
10223 amdgpu_dm_commit_planes(state, dc_state, dev,
10224 dm, crtc, wait_for_vblank);
10227 /* Update audio instances for each connector. */
10228 amdgpu_dm_commit_audio(dev, state);
10230 /* restore the backlight level */
10231 for (i = 0; i < dm->num_of_edps; i++) {
10232 if (dm->backlight_dev[i] &&
10233 (dm->actual_brightness[i] != dm->brightness[i]))
10234 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
10238 * send vblank event on all events not handled in flip and
10239 * mark consumed event for drm_atomic_helper_commit_hw_done
10241 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10242 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10244 if (new_crtc_state->event)
10245 drm_send_event_locked(dev, &new_crtc_state->event->base);
10247 new_crtc_state->event = NULL;
10249 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10251 /* Signal HW programming completion */
10252 drm_atomic_helper_commit_hw_done(state);
10254 if (wait_for_vblank)
10255 drm_atomic_helper_wait_for_flip_done(dev, state);
10257 drm_atomic_helper_cleanup_planes(dev, state);
10259 /* return the stolen vga memory back to VRAM */
10260 if (!adev->mman.keep_stolen_vga_memory)
10261 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
10262 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
10265 * Finally, drop a runtime PM reference for each newly disabled CRTC,
10266 * so we can put the GPU into runtime suspend if we're not driving any
10269 for (i = 0; i < crtc_disable_count; i++)
10270 pm_runtime_put_autosuspend(dev->dev);
10271 pm_runtime_mark_last_busy(dev->dev);
10274 dc_release_state(dc_state_temp);
10278 static int dm_force_atomic_commit(struct drm_connector *connector)
10281 struct drm_device *ddev = connector->dev;
10282 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
10283 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10284 struct drm_plane *plane = disconnected_acrtc->base.primary;
10285 struct drm_connector_state *conn_state;
10286 struct drm_crtc_state *crtc_state;
10287 struct drm_plane_state *plane_state;
10292 state->acquire_ctx = ddev->mode_config.acquire_ctx;
10294 /* Construct an atomic state to restore previous display setting */
10297 * Attach connectors to drm_atomic_state
10299 conn_state = drm_atomic_get_connector_state(state, connector);
10301 ret = PTR_ERR_OR_ZERO(conn_state);
10305 /* Attach crtc to drm_atomic_state*/
10306 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10308 ret = PTR_ERR_OR_ZERO(crtc_state);
10312 /* force a restore */
10313 crtc_state->mode_changed = true;
10315 /* Attach plane to drm_atomic_state */
10316 plane_state = drm_atomic_get_plane_state(state, plane);
10318 ret = PTR_ERR_OR_ZERO(plane_state);
10322 /* Call commit internally with the state we just constructed */
10323 ret = drm_atomic_commit(state);
10326 drm_atomic_state_put(state);
10328 DRM_ERROR("Restoring old state failed with %i\n", ret);
10334 * This function handles all cases when set mode does not come upon hotplug.
10335 * This includes when a display is unplugged then plugged back into the
10336 * same port and when running without usermode desktop manager supprot
10338 void dm_restore_drm_connector_state(struct drm_device *dev,
10339 struct drm_connector *connector)
10341 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10342 struct amdgpu_crtc *disconnected_acrtc;
10343 struct dm_crtc_state *acrtc_state;
10345 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10348 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10349 if (!disconnected_acrtc)
10352 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10353 if (!acrtc_state->stream)
10357 * If the previous sink is not released and different from the current,
10358 * we deduce we are in a state where we can not rely on usermode call
10359 * to turn on the display, so we do it here
10361 if (acrtc_state->stream->sink != aconnector->dc_sink)
10362 dm_force_atomic_commit(&aconnector->base);
10366 * Grabs all modesetting locks to serialize against any blocking commits,
10367 * Waits for completion of all non blocking commits.
10369 static int do_aquire_global_lock(struct drm_device *dev,
10370 struct drm_atomic_state *state)
10372 struct drm_crtc *crtc;
10373 struct drm_crtc_commit *commit;
10377 * Adding all modeset locks to aquire_ctx will
10378 * ensure that when the framework release it the
10379 * extra locks we are locking here will get released to
10381 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10385 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10386 spin_lock(&crtc->commit_lock);
10387 commit = list_first_entry_or_null(&crtc->commit_list,
10388 struct drm_crtc_commit, commit_entry);
10390 drm_crtc_commit_get(commit);
10391 spin_unlock(&crtc->commit_lock);
10397 * Make sure all pending HW programming completed and
10400 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10403 ret = wait_for_completion_interruptible_timeout(
10404 &commit->flip_done, 10*HZ);
10407 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10408 "timed out\n", crtc->base.id, crtc->name);
10410 drm_crtc_commit_put(commit);
10413 return ret < 0 ? ret : 0;
10416 static void get_freesync_config_for_crtc(
10417 struct dm_crtc_state *new_crtc_state,
10418 struct dm_connector_state *new_con_state)
10420 struct mod_freesync_config config = {0};
10421 struct amdgpu_dm_connector *aconnector =
10422 to_amdgpu_dm_connector(new_con_state->base.connector);
10423 struct drm_display_mode *mode = &new_crtc_state->base.mode;
10424 int vrefresh = drm_mode_vrefresh(mode);
10425 bool fs_vid_mode = false;
10427 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10428 vrefresh >= aconnector->min_vfreq &&
10429 vrefresh <= aconnector->max_vfreq;
10431 if (new_crtc_state->vrr_supported) {
10432 new_crtc_state->stream->ignore_msa_timing_param = true;
10433 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10435 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10436 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10437 config.vsif_supported = true;
10441 config.state = VRR_STATE_ACTIVE_FIXED;
10442 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10444 } else if (new_crtc_state->base.vrr_enabled) {
10445 config.state = VRR_STATE_ACTIVE_VARIABLE;
10447 config.state = VRR_STATE_INACTIVE;
10451 new_crtc_state->freesync_config = config;
10454 static void reset_freesync_config_for_crtc(
10455 struct dm_crtc_state *new_crtc_state)
10457 new_crtc_state->vrr_supported = false;
10459 memset(&new_crtc_state->vrr_infopacket, 0,
10460 sizeof(new_crtc_state->vrr_infopacket));
10464 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10465 struct drm_crtc_state *new_crtc_state)
10467 const struct drm_display_mode *old_mode, *new_mode;
10469 if (!old_crtc_state || !new_crtc_state)
10472 old_mode = &old_crtc_state->mode;
10473 new_mode = &new_crtc_state->mode;
10475 if (old_mode->clock == new_mode->clock &&
10476 old_mode->hdisplay == new_mode->hdisplay &&
10477 old_mode->vdisplay == new_mode->vdisplay &&
10478 old_mode->htotal == new_mode->htotal &&
10479 old_mode->vtotal != new_mode->vtotal &&
10480 old_mode->hsync_start == new_mode->hsync_start &&
10481 old_mode->vsync_start != new_mode->vsync_start &&
10482 old_mode->hsync_end == new_mode->hsync_end &&
10483 old_mode->vsync_end != new_mode->vsync_end &&
10484 old_mode->hskew == new_mode->hskew &&
10485 old_mode->vscan == new_mode->vscan &&
10486 (old_mode->vsync_end - old_mode->vsync_start) ==
10487 (new_mode->vsync_end - new_mode->vsync_start))
10493 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10494 uint64_t num, den, res;
10495 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10497 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10499 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10500 den = (unsigned long long)new_crtc_state->mode.htotal *
10501 (unsigned long long)new_crtc_state->mode.vtotal;
10503 res = div_u64(num, den);
10504 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10507 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10508 struct drm_atomic_state *state,
10509 struct drm_crtc *crtc,
10510 struct drm_crtc_state *old_crtc_state,
10511 struct drm_crtc_state *new_crtc_state,
10513 bool *lock_and_validation_needed)
10515 struct dm_atomic_state *dm_state = NULL;
10516 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10517 struct dc_stream_state *new_stream;
10521 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10522 * update changed items
10524 struct amdgpu_crtc *acrtc = NULL;
10525 struct amdgpu_dm_connector *aconnector = NULL;
10526 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10527 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10531 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10532 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10533 acrtc = to_amdgpu_crtc(crtc);
10534 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10536 /* TODO This hack should go away */
10537 if (aconnector && enable) {
10538 /* Make sure fake sink is created in plug-in scenario */
10539 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10540 &aconnector->base);
10541 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10542 &aconnector->base);
10544 if (IS_ERR(drm_new_conn_state)) {
10545 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10549 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10550 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10552 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10555 new_stream = create_validate_stream_for_sink(aconnector,
10556 &new_crtc_state->mode,
10558 dm_old_crtc_state->stream);
10561 * we can have no stream on ACTION_SET if a display
10562 * was disconnected during S3, in this case it is not an
10563 * error, the OS will be updated after detection, and
10564 * will do the right thing on next atomic commit
10568 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10569 __func__, acrtc->base.base.id);
10575 * TODO: Check VSDB bits to decide whether this should
10576 * be enabled or not.
10578 new_stream->triggered_crtc_reset.enabled =
10579 dm->force_timing_sync;
10581 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10583 ret = fill_hdr_info_packet(drm_new_conn_state,
10584 &new_stream->hdr_static_metadata);
10589 * If we already removed the old stream from the context
10590 * (and set the new stream to NULL) then we can't reuse
10591 * the old stream even if the stream and scaling are unchanged.
10592 * We'll hit the BUG_ON and black screen.
10594 * TODO: Refactor this function to allow this check to work
10595 * in all conditions.
10597 if (dm_new_crtc_state->stream &&
10598 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10601 if (dm_new_crtc_state->stream &&
10602 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10603 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10604 new_crtc_state->mode_changed = false;
10605 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10606 new_crtc_state->mode_changed);
10610 /* mode_changed flag may get updated above, need to check again */
10611 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10614 drm_dbg_state(state->dev,
10615 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10616 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10617 "connectors_changed:%d\n",
10619 new_crtc_state->enable,
10620 new_crtc_state->active,
10621 new_crtc_state->planes_changed,
10622 new_crtc_state->mode_changed,
10623 new_crtc_state->active_changed,
10624 new_crtc_state->connectors_changed);
10626 /* Remove stream for any changed/disabled CRTC */
10629 if (!dm_old_crtc_state->stream)
10632 if (dm_new_crtc_state->stream &&
10633 is_timing_unchanged_for_freesync(new_crtc_state,
10635 new_crtc_state->mode_changed = false;
10637 "Mode change not required for front porch change, "
10638 "setting mode_changed to %d",
10639 new_crtc_state->mode_changed);
10641 set_freesync_fixed_config(dm_new_crtc_state);
10644 } else if (aconnector &&
10645 is_freesync_video_mode(&new_crtc_state->mode,
10647 struct drm_display_mode *high_mode;
10649 high_mode = get_highest_refresh_rate_mode(aconnector, false);
10650 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10651 set_freesync_fixed_config(dm_new_crtc_state);
10655 ret = dm_atomic_get_state(state, &dm_state);
10659 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10662 /* i.e. reset mode */
10663 if (dc_remove_stream_from_ctx(
10666 dm_old_crtc_state->stream) != DC_OK) {
10671 dc_stream_release(dm_old_crtc_state->stream);
10672 dm_new_crtc_state->stream = NULL;
10674 reset_freesync_config_for_crtc(dm_new_crtc_state);
10676 *lock_and_validation_needed = true;
10678 } else {/* Add stream for any updated/enabled CRTC */
10680 * Quick fix to prevent NULL pointer on new_stream when
10681 * added MST connectors not found in existing crtc_state in the chained mode
10682 * TODO: need to dig out the root cause of that
10687 if (modereset_required(new_crtc_state))
10690 if (modeset_required(new_crtc_state, new_stream,
10691 dm_old_crtc_state->stream)) {
10693 WARN_ON(dm_new_crtc_state->stream);
10695 ret = dm_atomic_get_state(state, &dm_state);
10699 dm_new_crtc_state->stream = new_stream;
10701 dc_stream_retain(new_stream);
10703 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10706 if (dc_add_stream_to_ctx(
10709 dm_new_crtc_state->stream) != DC_OK) {
10714 *lock_and_validation_needed = true;
10719 /* Release extra reference */
10721 dc_stream_release(new_stream);
10724 * We want to do dc stream updates that do not require a
10725 * full modeset below.
10727 if (!(enable && aconnector && new_crtc_state->active))
10730 * Given above conditions, the dc state cannot be NULL because:
10731 * 1. We're in the process of enabling CRTCs (just been added
10732 * to the dc context, or already is on the context)
10733 * 2. Has a valid connector attached, and
10734 * 3. Is currently active and enabled.
10735 * => The dc stream state currently exists.
10737 BUG_ON(dm_new_crtc_state->stream == NULL);
10739 /* Scaling or underscan settings */
10740 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10741 drm_atomic_crtc_needs_modeset(new_crtc_state))
10742 update_stream_scaling_settings(
10743 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10746 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10749 * Color management settings. We also update color properties
10750 * when a modeset is needed, to ensure it gets reprogrammed.
10752 if (dm_new_crtc_state->base.color_mgmt_changed ||
10753 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10754 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10759 /* Update Freesync settings. */
10760 get_freesync_config_for_crtc(dm_new_crtc_state,
10761 dm_new_conn_state);
10767 dc_stream_release(new_stream);
10771 static bool should_reset_plane(struct drm_atomic_state *state,
10772 struct drm_plane *plane,
10773 struct drm_plane_state *old_plane_state,
10774 struct drm_plane_state *new_plane_state)
10776 struct drm_plane *other;
10777 struct drm_plane_state *old_other_state, *new_other_state;
10778 struct drm_crtc_state *new_crtc_state;
10782 * TODO: Remove this hack once the checks below are sufficient
10783 * enough to determine when we need to reset all the planes on
10786 if (state->allow_modeset)
10789 /* Exit early if we know that we're adding or removing the plane. */
10790 if (old_plane_state->crtc != new_plane_state->crtc)
10793 /* old crtc == new_crtc == NULL, plane not in context. */
10794 if (!new_plane_state->crtc)
10798 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10800 if (!new_crtc_state)
10803 /* CRTC Degamma changes currently require us to recreate planes. */
10804 if (new_crtc_state->color_mgmt_changed)
10807 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10811 * If there are any new primary or overlay planes being added or
10812 * removed then the z-order can potentially change. To ensure
10813 * correct z-order and pipe acquisition the current DC architecture
10814 * requires us to remove and recreate all existing planes.
10816 * TODO: Come up with a more elegant solution for this.
10818 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10819 struct amdgpu_framebuffer *old_afb, *new_afb;
10820 if (other->type == DRM_PLANE_TYPE_CURSOR)
10823 if (old_other_state->crtc != new_plane_state->crtc &&
10824 new_other_state->crtc != new_plane_state->crtc)
10827 if (old_other_state->crtc != new_other_state->crtc)
10830 /* Src/dst size and scaling updates. */
10831 if (old_other_state->src_w != new_other_state->src_w ||
10832 old_other_state->src_h != new_other_state->src_h ||
10833 old_other_state->crtc_w != new_other_state->crtc_w ||
10834 old_other_state->crtc_h != new_other_state->crtc_h)
10837 /* Rotation / mirroring updates. */
10838 if (old_other_state->rotation != new_other_state->rotation)
10841 /* Blending updates. */
10842 if (old_other_state->pixel_blend_mode !=
10843 new_other_state->pixel_blend_mode)
10846 /* Alpha updates. */
10847 if (old_other_state->alpha != new_other_state->alpha)
10850 /* Colorspace changes. */
10851 if (old_other_state->color_range != new_other_state->color_range ||
10852 old_other_state->color_encoding != new_other_state->color_encoding)
10855 /* Framebuffer checks fall at the end. */
10856 if (!old_other_state->fb || !new_other_state->fb)
10859 /* Pixel format changes can require bandwidth updates. */
10860 if (old_other_state->fb->format != new_other_state->fb->format)
10863 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10864 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10866 /* Tiling and DCC changes also require bandwidth updates. */
10867 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10868 old_afb->base.modifier != new_afb->base.modifier)
10875 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10876 struct drm_plane_state *new_plane_state,
10877 struct drm_framebuffer *fb)
10879 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10880 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10881 unsigned int pitch;
10884 if (fb->width > new_acrtc->max_cursor_width ||
10885 fb->height > new_acrtc->max_cursor_height) {
10886 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10887 new_plane_state->fb->width,
10888 new_plane_state->fb->height);
10891 if (new_plane_state->src_w != fb->width << 16 ||
10892 new_plane_state->src_h != fb->height << 16) {
10893 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10897 /* Pitch in pixels */
10898 pitch = fb->pitches[0] / fb->format->cpp[0];
10900 if (fb->width != pitch) {
10901 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10910 /* FB pitch is supported by cursor plane */
10913 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10917 /* Core DRM takes care of checking FB modifiers, so we only need to
10918 * check tiling flags when the FB doesn't have a modifier. */
10919 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10920 if (adev->family < AMDGPU_FAMILY_AI) {
10921 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10922 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10923 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10925 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10928 DRM_DEBUG_ATOMIC("Cursor FB not linear");
10936 static int dm_update_plane_state(struct dc *dc,
10937 struct drm_atomic_state *state,
10938 struct drm_plane *plane,
10939 struct drm_plane_state *old_plane_state,
10940 struct drm_plane_state *new_plane_state,
10942 bool *lock_and_validation_needed)
10945 struct dm_atomic_state *dm_state = NULL;
10946 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10947 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10948 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10949 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10950 struct amdgpu_crtc *new_acrtc;
10955 new_plane_crtc = new_plane_state->crtc;
10956 old_plane_crtc = old_plane_state->crtc;
10957 dm_new_plane_state = to_dm_plane_state(new_plane_state);
10958 dm_old_plane_state = to_dm_plane_state(old_plane_state);
10960 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10961 if (!enable || !new_plane_crtc ||
10962 drm_atomic_plane_disabling(plane->state, new_plane_state))
10965 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10967 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10968 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10972 if (new_plane_state->fb) {
10973 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10974 new_plane_state->fb);
10982 needs_reset = should_reset_plane(state, plane, old_plane_state,
10985 /* Remove any changed/removed planes */
10990 if (!old_plane_crtc)
10993 old_crtc_state = drm_atomic_get_old_crtc_state(
10994 state, old_plane_crtc);
10995 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10997 if (!dm_old_crtc_state->stream)
11000 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
11001 plane->base.id, old_plane_crtc->base.id);
11003 ret = dm_atomic_get_state(state, &dm_state);
11007 if (!dc_remove_plane_from_context(
11009 dm_old_crtc_state->stream,
11010 dm_old_plane_state->dc_state,
11011 dm_state->context)) {
11017 dc_plane_state_release(dm_old_plane_state->dc_state);
11018 dm_new_plane_state->dc_state = NULL;
11020 *lock_and_validation_needed = true;
11022 } else { /* Add new planes */
11023 struct dc_plane_state *dc_new_plane_state;
11025 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
11028 if (!new_plane_crtc)
11031 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
11032 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11034 if (!dm_new_crtc_state->stream)
11040 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
11044 WARN_ON(dm_new_plane_state->dc_state);
11046 dc_new_plane_state = dc_create_plane_state(dc);
11047 if (!dc_new_plane_state)
11050 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
11051 plane->base.id, new_plane_crtc->base.id);
11053 ret = fill_dc_plane_attributes(
11054 drm_to_adev(new_plane_crtc->dev),
11055 dc_new_plane_state,
11059 dc_plane_state_release(dc_new_plane_state);
11063 ret = dm_atomic_get_state(state, &dm_state);
11065 dc_plane_state_release(dc_new_plane_state);
11070 * Any atomic check errors that occur after this will
11071 * not need a release. The plane state will be attached
11072 * to the stream, and therefore part of the atomic
11073 * state. It'll be released when the atomic state is
11076 if (!dc_add_plane_to_context(
11078 dm_new_crtc_state->stream,
11079 dc_new_plane_state,
11080 dm_state->context)) {
11082 dc_plane_state_release(dc_new_plane_state);
11086 dm_new_plane_state->dc_state = dc_new_plane_state;
11088 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
11090 /* Tell DC to do a full surface update every time there
11091 * is a plane change. Inefficient, but works for now.
11093 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
11095 *lock_and_validation_needed = true;
11102 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
11103 int *src_w, int *src_h)
11105 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
11106 case DRM_MODE_ROTATE_90:
11107 case DRM_MODE_ROTATE_270:
11108 *src_w = plane_state->src_h >> 16;
11109 *src_h = plane_state->src_w >> 16;
11111 case DRM_MODE_ROTATE_0:
11112 case DRM_MODE_ROTATE_180:
11114 *src_w = plane_state->src_w >> 16;
11115 *src_h = plane_state->src_h >> 16;
11120 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
11121 struct drm_crtc *crtc,
11122 struct drm_crtc_state *new_crtc_state)
11124 struct drm_plane *cursor = crtc->cursor, *underlying;
11125 struct drm_plane_state *new_cursor_state, *new_underlying_state;
11127 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
11128 int cursor_src_w, cursor_src_h;
11129 int underlying_src_w, underlying_src_h;
11131 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
11132 * cursor per pipe but it's going to inherit the scaling and
11133 * positioning from the underlying pipe. Check the cursor plane's
11134 * blending properties match the underlying planes'. */
11136 new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
11137 if (!new_cursor_state || !new_cursor_state->fb) {
11141 dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
11142 cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
11143 cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
11145 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
11146 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
11147 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
11150 /* Ignore disabled planes */
11151 if (!new_underlying_state->fb)
11154 dm_get_oriented_plane_size(new_underlying_state,
11155 &underlying_src_w, &underlying_src_h);
11156 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
11157 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
11159 if (cursor_scale_w != underlying_scale_w ||
11160 cursor_scale_h != underlying_scale_h) {
11161 drm_dbg_atomic(crtc->dev,
11162 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
11163 cursor->base.id, cursor->name, underlying->base.id, underlying->name);
11167 /* If this plane covers the whole CRTC, no need to check planes underneath */
11168 if (new_underlying_state->crtc_x <= 0 &&
11169 new_underlying_state->crtc_y <= 0 &&
11170 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
11171 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
11178 #if defined(CONFIG_DRM_AMD_DC_DCN)
11179 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
11181 struct drm_connector *connector;
11182 struct drm_connector_state *conn_state, *old_conn_state;
11183 struct amdgpu_dm_connector *aconnector = NULL;
11185 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
11186 if (!conn_state->crtc)
11187 conn_state = old_conn_state;
11189 if (conn_state->crtc != crtc)
11192 aconnector = to_amdgpu_dm_connector(connector);
11193 if (!aconnector->port || !aconnector->mst_port)
11202 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
11207 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
11208 * @dev: The DRM device
11209 * @state: The atomic state to commit
11211 * Validate that the given atomic state is programmable by DC into hardware.
11212 * This involves constructing a &struct dc_state reflecting the new hardware
11213 * state we wish to commit, then querying DC to see if it is programmable. It's
11214 * important not to modify the existing DC state. Otherwise, atomic_check
11215 * may unexpectedly commit hardware changes.
11217 * When validating the DC state, it's important that the right locks are
11218 * acquired. For full updates case which removes/adds/updates streams on one
11219 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
11220 * that any such full update commit will wait for completion of any outstanding
11221 * flip using DRMs synchronization events.
11223 * Note that DM adds the affected connectors for all CRTCs in state, when that
11224 * might not seem necessary. This is because DC stream creation requires the
11225 * DC sink, which is tied to the DRM connector state. Cleaning this up should
11226 * be possible but non-trivial - a possible TODO item.
11228 * Return: -Error code if validation failed.
11230 static int amdgpu_dm_atomic_check(struct drm_device *dev,
11231 struct drm_atomic_state *state)
11233 struct amdgpu_device *adev = drm_to_adev(dev);
11234 struct dm_atomic_state *dm_state = NULL;
11235 struct dc *dc = adev->dm.dc;
11236 struct drm_connector *connector;
11237 struct drm_connector_state *old_con_state, *new_con_state;
11238 struct drm_crtc *crtc;
11239 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
11240 struct drm_plane *plane;
11241 struct drm_plane_state *old_plane_state, *new_plane_state;
11242 enum dc_status status;
11244 bool lock_and_validation_needed = false;
11245 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
11246 #if defined(CONFIG_DRM_AMD_DC_DCN)
11247 struct dsc_mst_fairness_vars vars[MAX_PIPES];
11248 struct drm_dp_mst_topology_state *mst_state;
11249 struct drm_dp_mst_topology_mgr *mgr;
11252 trace_amdgpu_dm_atomic_check_begin(state);
11254 ret = drm_atomic_helper_check_modeset(dev, state);
11256 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
11260 /* Check connector changes */
11261 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11262 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11263 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11265 /* Skip connectors that are disabled or part of modeset already. */
11266 if (!old_con_state->crtc && !new_con_state->crtc)
11269 if (!new_con_state->crtc)
11272 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
11273 if (IS_ERR(new_crtc_state)) {
11274 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
11275 ret = PTR_ERR(new_crtc_state);
11279 if (dm_old_con_state->abm_level !=
11280 dm_new_con_state->abm_level)
11281 new_crtc_state->connectors_changed = true;
11284 #if defined(CONFIG_DRM_AMD_DC_DCN)
11285 if (dc_resource_is_dsc_encoding_supported(dc)) {
11286 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11287 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11288 ret = add_affected_mst_dsc_crtcs(state, crtc);
11290 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
11295 if (!pre_validate_dsc(state, &dm_state, vars)) {
11301 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11302 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11304 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
11305 !new_crtc_state->color_mgmt_changed &&
11306 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11307 dm_old_crtc_state->dsc_force_changed == false)
11310 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11312 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
11316 if (!new_crtc_state->enable)
11319 ret = drm_atomic_add_affected_connectors(state, crtc);
11321 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11325 ret = drm_atomic_add_affected_planes(state, crtc);
11327 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11331 if (dm_old_crtc_state->dsc_force_changed)
11332 new_crtc_state->mode_changed = true;
11336 * Add all primary and overlay planes on the CRTC to the state
11337 * whenever a plane is enabled to maintain correct z-ordering
11338 * and to enable fast surface updates.
11340 drm_for_each_crtc(crtc, dev) {
11341 bool modified = false;
11343 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11344 if (plane->type == DRM_PLANE_TYPE_CURSOR)
11347 if (new_plane_state->crtc == crtc ||
11348 old_plane_state->crtc == crtc) {
11357 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11358 if (plane->type == DRM_PLANE_TYPE_CURSOR)
11362 drm_atomic_get_plane_state(state, plane);
11364 if (IS_ERR(new_plane_state)) {
11365 ret = PTR_ERR(new_plane_state);
11366 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11372 /* Remove exiting planes if they are modified */
11373 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11374 ret = dm_update_plane_state(dc, state, plane,
11378 &lock_and_validation_needed);
11380 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11385 /* Disable all crtcs which require disable */
11386 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11387 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11391 &lock_and_validation_needed);
11393 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11398 /* Enable all crtcs which require enable */
11399 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11400 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11404 &lock_and_validation_needed);
11406 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11411 /* Add new/modified planes */
11412 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11413 ret = dm_update_plane_state(dc, state, plane,
11417 &lock_and_validation_needed);
11419 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11424 /* Run this here since we want to validate the streams we created */
11425 ret = drm_atomic_helper_check_planes(dev, state);
11427 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11431 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11432 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11433 if (dm_new_crtc_state->mpo_requested)
11434 DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11437 /* Check cursor planes scaling */
11438 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11439 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11441 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11446 if (state->legacy_cursor_update) {
11448 * This is a fast cursor update coming from the plane update
11449 * helper, check if it can be done asynchronously for better
11452 state->async_update =
11453 !drm_atomic_helper_async_check(dev, state);
11456 * Skip the remaining global validation if this is an async
11457 * update. Cursor updates can be done without affecting
11458 * state or bandwidth calcs and this avoids the performance
11459 * penalty of locking the private state object and
11460 * allocating a new dc_state.
11462 if (state->async_update)
11466 /* Check scaling and underscan changes*/
11467 /* TODO Removed scaling changes validation due to inability to commit
11468 * new stream into context w\o causing full reset. Need to
11469 * decide how to handle.
11471 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11472 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11473 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11474 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11476 /* Skip any modesets/resets */
11477 if (!acrtc || drm_atomic_crtc_needs_modeset(
11478 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11481 /* Skip any thing not scale or underscan changes */
11482 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11485 lock_and_validation_needed = true;
11488 #if defined(CONFIG_DRM_AMD_DC_DCN)
11489 /* set the slot info for each mst_state based on the link encoding format */
11490 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11491 struct amdgpu_dm_connector *aconnector;
11492 struct drm_connector *connector;
11493 struct drm_connector_list_iter iter;
11494 u8 link_coding_cap;
11496 if (!mgr->mst_state )
11499 drm_connector_list_iter_begin(dev, &iter);
11500 drm_for_each_connector_iter(connector, &iter) {
11501 int id = connector->index;
11503 if (id == mst_state->mgr->conn_base_id) {
11504 aconnector = to_amdgpu_dm_connector(connector);
11505 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11506 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11511 drm_connector_list_iter_end(&iter);
11516 * Streams and planes are reset when there are changes that affect
11517 * bandwidth. Anything that affects bandwidth needs to go through
11518 * DC global validation to ensure that the configuration can be applied
11521 * We have to currently stall out here in atomic_check for outstanding
11522 * commits to finish in this case because our IRQ handlers reference
11523 * DRM state directly - we can end up disabling interrupts too early
11526 * TODO: Remove this stall and drop DM state private objects.
11528 if (lock_and_validation_needed) {
11529 ret = dm_atomic_get_state(state, &dm_state);
11531 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11535 ret = do_aquire_global_lock(dev, state);
11537 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11541 #if defined(CONFIG_DRM_AMD_DC_DCN)
11542 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11543 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11548 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11550 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11556 * Perform validation of MST topology in the state:
11557 * We need to perform MST atomic check before calling
11558 * dc_validate_global_state(), or there is a chance
11559 * to get stuck in an infinite loop and hang eventually.
11561 ret = drm_dp_mst_atomic_check(state);
11563 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11566 status = dc_validate_global_state(dc, dm_state->context, true);
11567 if (status != DC_OK) {
11568 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11569 dc_status_to_str(status), status);
11575 * The commit is a fast update. Fast updates shouldn't change
11576 * the DC context, affect global validation, and can have their
11577 * commit work done in parallel with other commits not touching
11578 * the same resource. If we have a new DC context as part of
11579 * the DM atomic state from validation we need to free it and
11580 * retain the existing one instead.
11582 * Furthermore, since the DM atomic state only contains the DC
11583 * context and can safely be annulled, we can free the state
11584 * and clear the associated private object now to free
11585 * some memory and avoid a possible use-after-free later.
11588 for (i = 0; i < state->num_private_objs; i++) {
11589 struct drm_private_obj *obj = state->private_objs[i].ptr;
11591 if (obj->funcs == adev->dm.atomic_obj.funcs) {
11592 int j = state->num_private_objs-1;
11594 dm_atomic_destroy_state(obj,
11595 state->private_objs[i].state);
11597 /* If i is not at the end of the array then the
11598 * last element needs to be moved to where i was
11599 * before the array can safely be truncated.
11602 state->private_objs[i] =
11603 state->private_objs[j];
11605 state->private_objs[j].ptr = NULL;
11606 state->private_objs[j].state = NULL;
11607 state->private_objs[j].old_state = NULL;
11608 state->private_objs[j].new_state = NULL;
11610 state->num_private_objs = j;
11616 /* Store the overall update type for use later in atomic check. */
11617 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11618 struct dm_crtc_state *dm_new_crtc_state =
11619 to_dm_crtc_state(new_crtc_state);
11621 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11626 /* Must be success */
11629 trace_amdgpu_dm_atomic_check_finish(state, ret);
11634 if (ret == -EDEADLK)
11635 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11636 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11637 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11639 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11641 trace_amdgpu_dm_atomic_check_finish(state, ret);
11646 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11647 struct amdgpu_dm_connector *amdgpu_dm_connector)
11650 bool capable = false;
11652 if (amdgpu_dm_connector->dc_link &&
11653 dm_helpers_dp_read_dpcd(
11655 amdgpu_dm_connector->dc_link,
11656 DP_DOWN_STREAM_PORT_COUNT,
11658 sizeof(dpcd_data))) {
11659 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11665 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11666 unsigned int offset,
11667 unsigned int total_length,
11669 unsigned int length,
11670 struct amdgpu_hdmi_vsdb_info *vsdb)
11673 union dmub_rb_cmd cmd;
11674 struct dmub_cmd_send_edid_cea *input;
11675 struct dmub_cmd_edid_cea_output *output;
11677 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11680 memset(&cmd, 0, sizeof(cmd));
11682 input = &cmd.edid_cea.data.input;
11684 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11685 cmd.edid_cea.header.sub_type = 0;
11686 cmd.edid_cea.header.payload_bytes =
11687 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11688 input->offset = offset;
11689 input->length = length;
11690 input->cea_total_length = total_length;
11691 memcpy(input->payload, data, length);
11693 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11695 DRM_ERROR("EDID CEA parser failed\n");
11699 output = &cmd.edid_cea.data.output;
11701 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11702 if (!output->ack.success) {
11703 DRM_ERROR("EDID CEA ack failed at offset %d\n",
11704 output->ack.offset);
11706 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11707 if (!output->amd_vsdb.vsdb_found)
11710 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11711 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11712 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11713 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11715 DRM_WARN("Unknown EDID CEA parser results\n");
11722 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11723 uint8_t *edid_ext, int len,
11724 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11728 /* send extension block to DMCU for parsing */
11729 for (i = 0; i < len; i += 8) {
11733 /* send 8 bytes a time */
11734 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11738 /* EDID block sent completed, expect result */
11739 int version, min_rate, max_rate;
11741 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11743 /* amd vsdb found */
11744 vsdb_info->freesync_supported = 1;
11745 vsdb_info->amd_vsdb_version = version;
11746 vsdb_info->min_refresh_rate_hz = min_rate;
11747 vsdb_info->max_refresh_rate_hz = max_rate;
11755 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11763 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11764 uint8_t *edid_ext, int len,
11765 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11769 /* send extension block to DMCU for parsing */
11770 for (i = 0; i < len; i += 8) {
11771 /* send 8 bytes a time */
11772 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11776 return vsdb_info->freesync_supported;
11779 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11780 uint8_t *edid_ext, int len,
11781 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11783 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11785 if (adev->dm.dmub_srv)
11786 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11788 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11791 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11792 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11794 uint8_t *edid_ext = NULL;
11796 bool valid_vsdb_found = false;
11798 /*----- drm_find_cea_extension() -----*/
11799 /* No EDID or EDID extensions */
11800 if (edid == NULL || edid->extensions == 0)
11803 /* Find CEA extension */
11804 for (i = 0; i < edid->extensions; i++) {
11805 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11806 if (edid_ext[0] == CEA_EXT)
11810 if (i == edid->extensions)
11813 /*----- cea_db_offsets() -----*/
11814 if (edid_ext[0] != CEA_EXT)
11817 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11819 return valid_vsdb_found ? i : -ENODEV;
11822 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11826 struct detailed_timing *timing;
11827 struct detailed_non_pixel *data;
11828 struct detailed_data_monitor_range *range;
11829 struct amdgpu_dm_connector *amdgpu_dm_connector =
11830 to_amdgpu_dm_connector(connector);
11831 struct dm_connector_state *dm_con_state = NULL;
11832 struct dc_sink *sink;
11834 struct drm_device *dev = connector->dev;
11835 struct amdgpu_device *adev = drm_to_adev(dev);
11836 bool freesync_capable = false;
11837 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11839 if (!connector->state) {
11840 DRM_ERROR("%s - Connector has no state", __func__);
11844 sink = amdgpu_dm_connector->dc_sink ?
11845 amdgpu_dm_connector->dc_sink :
11846 amdgpu_dm_connector->dc_em_sink;
11848 if (!edid || !sink) {
11849 dm_con_state = to_dm_connector_state(connector->state);
11851 amdgpu_dm_connector->min_vfreq = 0;
11852 amdgpu_dm_connector->max_vfreq = 0;
11853 amdgpu_dm_connector->pixel_clock_mhz = 0;
11854 connector->display_info.monitor_range.min_vfreq = 0;
11855 connector->display_info.monitor_range.max_vfreq = 0;
11856 freesync_capable = false;
11861 dm_con_state = to_dm_connector_state(connector->state);
11863 if (!adev->dm.freesync_module)
11867 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11868 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11869 bool edid_check_required = false;
11872 edid_check_required = is_dp_capable_without_timing_msa(
11874 amdgpu_dm_connector);
11877 if (edid_check_required == true && (edid->version > 1 ||
11878 (edid->version == 1 && edid->revision > 1))) {
11879 for (i = 0; i < 4; i++) {
11881 timing = &edid->detailed_timings[i];
11882 data = &timing->data.other_data;
11883 range = &data->data.range;
11885 * Check if monitor has continuous frequency mode
11887 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11890 * Check for flag range limits only. If flag == 1 then
11891 * no additional timing information provided.
11892 * Default GTF, GTF Secondary curve and CVT are not
11895 if (range->flags != 1)
11898 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11899 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11900 amdgpu_dm_connector->pixel_clock_mhz =
11901 range->pixel_clock_mhz * 10;
11903 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11904 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11909 if (amdgpu_dm_connector->max_vfreq -
11910 amdgpu_dm_connector->min_vfreq > 10) {
11912 freesync_capable = true;
11915 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11916 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11917 if (i >= 0 && vsdb_info.freesync_supported) {
11918 timing = &edid->detailed_timings[i];
11919 data = &timing->data.other_data;
11921 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11922 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11923 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11924 freesync_capable = true;
11926 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11927 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11933 dm_con_state->freesync_capable = freesync_capable;
11935 if (connector->vrr_capable_property)
11936 drm_connector_set_vrr_capable_property(connector,
11940 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11942 struct amdgpu_device *adev = drm_to_adev(dev);
11943 struct dc *dc = adev->dm.dc;
11946 mutex_lock(&adev->dm.dc_lock);
11947 if (dc->current_state) {
11948 for (i = 0; i < dc->current_state->stream_count; ++i)
11949 dc->current_state->streams[i]
11950 ->triggered_crtc_reset.enabled =
11951 adev->dm.force_timing_sync;
11953 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11954 dc_trigger_sync(dc, dc->current_state);
11956 mutex_unlock(&adev->dm.dc_lock);
11959 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11960 uint32_t value, const char *func_name)
11962 #ifdef DM_CHECK_ADDR_0
11963 if (address == 0) {
11964 DC_ERR("invalid register write. address = 0");
11968 cgs_write_register(ctx->cgs_device, address, value);
11969 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11972 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11973 const char *func_name)
11976 #ifdef DM_CHECK_ADDR_0
11977 if (address == 0) {
11978 DC_ERR("invalid register read; address = 0\n");
11983 if (ctx->dmub_srv &&
11984 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11985 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11990 value = cgs_read_register(ctx->cgs_device, address);
11992 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11997 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11998 struct dc_context *ctx,
11999 uint8_t status_type,
12000 uint32_t *operation_result)
12002 struct amdgpu_device *adev = ctx->driver_context;
12003 int return_status = -1;
12004 struct dmub_notification *p_notify = adev->dm.dmub_notify;
12007 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
12008 return_status = p_notify->aux_reply.length;
12009 *operation_result = p_notify->result;
12010 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
12011 *operation_result = AUX_RET_ERROR_TIMEOUT;
12012 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
12013 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
12015 *operation_result = AUX_RET_ERROR_UNKNOWN;
12018 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
12020 *operation_result = p_notify->sc_status;
12022 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
12026 return return_status;
12029 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
12030 unsigned int link_index, void *cmd_payload, void *operation_result)
12032 struct amdgpu_device *adev = ctx->driver_context;
12036 dc_process_dmub_aux_transfer_async(ctx->dc,
12037 link_index, (struct aux_payload *)cmd_payload);
12038 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
12039 (struct set_config_cmd_payload *)cmd_payload,
12040 adev->dm.dmub_notify)) {
12041 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
12042 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
12043 (uint32_t *)operation_result);
12046 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
12048 DRM_ERROR("wait_for_completion_timeout timeout!");
12049 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
12050 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
12051 (uint32_t *)operation_result);
12055 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
12056 struct aux_payload *payload = (struct aux_payload *)cmd_payload;
12058 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
12059 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
12060 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
12061 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
12062 adev->dm.dmub_notify->aux_reply.length);
12067 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
12068 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
12069 (uint32_t *)operation_result);
12073 * Check whether seamless boot is supported.
12075 * So far we only support seamless boot on CHIP_VANGOGH.
12076 * If everything goes well, we may consider expanding
12077 * seamless boot to other ASICs.
12079 bool check_seamless_boot_capability(struct amdgpu_device *adev)
12081 switch (adev->asic_type) {
12083 if (!adev->mman.keep_stolen_vga_memory)