2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
49 #include "amdgpu_pm.h"
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
59 #include "ivsrcid/ivsrcid_vislands30.h"
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
108 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
111 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
123 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125 * requests into DC requests, and DC responses into DRM responses.
127 * The root control structure is &struct amdgpu_display_manager.
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
134 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 switch (link->dpcd_caps.dongle_type) {
137 case DISPLAY_DONGLE_NONE:
138 return DRM_MODE_SUBCONNECTOR_Native;
139 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140 return DRM_MODE_SUBCONNECTOR_VGA;
141 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142 case DISPLAY_DONGLE_DP_DVI_DONGLE:
143 return DRM_MODE_SUBCONNECTOR_DVID;
144 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146 return DRM_MODE_SUBCONNECTOR_HDMIA;
147 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149 return DRM_MODE_SUBCONNECTOR_Unknown;
153 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 struct dc_link *link = aconnector->dc_link;
156 struct drm_connector *connector = &aconnector->base;
157 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
162 if (aconnector->dc_sink)
163 subconnector = get_subconnector_type(link);
165 drm_object_property_set_value(&connector->base,
166 connector->dev->mode_config.dp_subconnector_property,
171 * initializes drm_device display related structures, based on the information
172 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173 * drm_encoder, drm_mode_config
175 * Returns 0 on success
177 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178 /* removes and deallocates the drm structures, created by the above function */
179 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
182 struct drm_plane *plane,
183 unsigned long possible_crtcs,
184 const struct dc_plane_cap *plane_cap);
185 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186 struct drm_plane *plane,
187 uint32_t link_index);
188 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189 struct amdgpu_dm_connector *amdgpu_dm_connector,
191 struct amdgpu_encoder *amdgpu_encoder);
192 static int amdgpu_dm_encoder_init(struct drm_device *dev,
193 struct amdgpu_encoder *aencoder,
194 uint32_t link_index);
196 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201 struct drm_atomic_state *state);
203 static void handle_cursor_update(struct drm_plane *plane,
204 struct drm_plane_state *old_plane_state);
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
212 static const struct drm_format_info *
213 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
216 * dm_vblank_get_counter
219 * Get counter for number of vertical blanks
222 * struct amdgpu_device *adev - [in] desired amdgpu device
223 * int disp_idx - [in] which CRTC to get the counter from
226 * Counter for vertical blanks
228 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
230 if (crtc >= adev->mode_info.num_crtc)
233 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
235 if (acrtc->dm_irq_params.stream == NULL) {
236 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
241 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
245 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
246 u32 *vbl, u32 *position)
248 uint32_t v_blank_start, v_blank_end, h_position, v_position;
250 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
253 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
255 if (acrtc->dm_irq_params.stream == NULL) {
256 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
262 * TODO rework base driver to use values directly.
263 * for now parse it back into reg-format
265 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
271 *position = v_position | (h_position << 16);
272 *vbl = v_blank_start | (v_blank_end << 16);
278 static bool dm_is_idle(void *handle)
284 static int dm_wait_for_idle(void *handle)
290 static bool dm_check_soft_reset(void *handle)
295 static int dm_soft_reset(void *handle)
301 static struct amdgpu_crtc *
302 get_crtc_by_otg_inst(struct amdgpu_device *adev,
305 struct drm_device *dev = adev_to_drm(adev);
306 struct drm_crtc *crtc;
307 struct amdgpu_crtc *amdgpu_crtc;
309 if (otg_inst == -1) {
311 return adev->mode_info.crtcs[0];
314 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
315 amdgpu_crtc = to_amdgpu_crtc(crtc);
317 if (amdgpu_crtc->otg_inst == otg_inst)
324 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
326 return acrtc->dm_irq_params.freesync_config.state ==
327 VRR_STATE_ACTIVE_VARIABLE ||
328 acrtc->dm_irq_params.freesync_config.state ==
329 VRR_STATE_ACTIVE_FIXED;
332 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
334 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
335 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
339 * dm_pflip_high_irq() - Handle pageflip interrupt
340 * @interrupt_params: ignored
342 * Handles the pageflip interrupt by notifying all interested parties
343 * that the pageflip has been completed.
345 static void dm_pflip_high_irq(void *interrupt_params)
347 struct amdgpu_crtc *amdgpu_crtc;
348 struct common_irq_params *irq_params = interrupt_params;
349 struct amdgpu_device *adev = irq_params->adev;
351 struct drm_pending_vblank_event *e;
352 uint32_t vpos, hpos, v_blank_start, v_blank_end;
355 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
357 /* IRQ could occur when in initial stage */
358 /* TODO work and BO cleanup */
359 if (amdgpu_crtc == NULL) {
360 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
364 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
366 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
367 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
368 amdgpu_crtc->pflip_status,
369 AMDGPU_FLIP_SUBMITTED,
370 amdgpu_crtc->crtc_id,
372 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
376 /* page flip completed. */
377 e = amdgpu_crtc->event;
378 amdgpu_crtc->event = NULL;
383 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
385 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
387 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
388 &v_blank_end, &hpos, &vpos) ||
389 (vpos < v_blank_start)) {
390 /* Update to correct count and vblank timestamp if racing with
391 * vblank irq. This also updates to the correct vblank timestamp
392 * even in VRR mode, as scanout is past the front-porch atm.
394 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
396 /* Wake up userspace by sending the pageflip event with proper
397 * count and timestamp of vblank of flip completion.
400 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
402 /* Event sent, so done with vblank for this flip */
403 drm_crtc_vblank_put(&amdgpu_crtc->base);
406 /* VRR active and inside front-porch: vblank count and
407 * timestamp for pageflip event will only be up to date after
408 * drm_crtc_handle_vblank() has been executed from late vblank
409 * irq handler after start of back-porch (vline 0). We queue the
410 * pageflip event for send-out by drm_crtc_handle_vblank() with
411 * updated timestamp and count, once it runs after us.
413 * We need to open-code this instead of using the helper
414 * drm_crtc_arm_vblank_event(), as that helper would
415 * call drm_crtc_accurate_vblank_count(), which we must
416 * not call in VRR mode while we are in front-porch!
419 /* sequence will be replaced by real count during send-out. */
420 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
421 e->pipe = amdgpu_crtc->crtc_id;
423 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
427 /* Keep track of vblank of this flip for flip throttling. We use the
428 * cooked hw counter, as that one incremented at start of this vblank
429 * of pageflip completion, so last_flip_vblank is the forbidden count
430 * for queueing new pageflips if vsync + VRR is enabled.
432 amdgpu_crtc->dm_irq_params.last_flip_vblank =
433 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
435 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
436 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
438 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
439 amdgpu_crtc->crtc_id, amdgpu_crtc,
440 vrr_active, (int) !e);
443 static void dm_vupdate_high_irq(void *interrupt_params)
445 struct common_irq_params *irq_params = interrupt_params;
446 struct amdgpu_device *adev = irq_params->adev;
447 struct amdgpu_crtc *acrtc;
451 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
454 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
456 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
460 /* Core vblank handling is done here after end of front-porch in
461 * vrr mode, as vblank timestamping will give valid results
462 * while now done after front-porch. This will also deliver
463 * page-flip completion events that have been queued to us
464 * if a pageflip happened inside front-porch.
467 drm_crtc_handle_vblank(&acrtc->base);
469 /* BTR processing for pre-DCE12 ASICs */
470 if (acrtc->dm_irq_params.stream &&
471 adev->family < AMDGPU_FAMILY_AI) {
472 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
473 mod_freesync_handle_v_update(
474 adev->dm.freesync_module,
475 acrtc->dm_irq_params.stream,
476 &acrtc->dm_irq_params.vrr_params);
478 dc_stream_adjust_vmin_vmax(
480 acrtc->dm_irq_params.stream,
481 &acrtc->dm_irq_params.vrr_params.adjust);
482 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
489 * dm_crtc_high_irq() - Handles CRTC interrupt
490 * @interrupt_params: used for determining the CRTC instance
492 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
495 static void dm_crtc_high_irq(void *interrupt_params)
497 struct common_irq_params *irq_params = interrupt_params;
498 struct amdgpu_device *adev = irq_params->adev;
499 struct amdgpu_crtc *acrtc;
503 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
507 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
509 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
510 vrr_active, acrtc->dm_irq_params.active_planes);
513 * Core vblank handling at start of front-porch is only possible
514 * in non-vrr mode, as only there vblank timestamping will give
515 * valid results while done in front-porch. Otherwise defer it
516 * to dm_vupdate_high_irq after end of front-porch.
519 drm_crtc_handle_vblank(&acrtc->base);
522 * Following stuff must happen at start of vblank, for crc
523 * computation and below-the-range btr support in vrr mode.
525 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
527 /* BTR updates need to happen before VUPDATE on Vega and above. */
528 if (adev->family < AMDGPU_FAMILY_AI)
531 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
533 if (acrtc->dm_irq_params.stream &&
534 acrtc->dm_irq_params.vrr_params.supported &&
535 acrtc->dm_irq_params.freesync_config.state ==
536 VRR_STATE_ACTIVE_VARIABLE) {
537 mod_freesync_handle_v_update(adev->dm.freesync_module,
538 acrtc->dm_irq_params.stream,
539 &acrtc->dm_irq_params.vrr_params);
541 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
542 &acrtc->dm_irq_params.vrr_params.adjust);
546 * If there aren't any active_planes then DCH HUBP may be clock-gated.
547 * In that case, pageflip completion interrupts won't fire and pageflip
548 * completion events won't get delivered. Prevent this by sending
549 * pending pageflip events from here if a flip is still pending.
551 * If any planes are enabled, use dm_pflip_high_irq() instead, to
552 * avoid race conditions between flip programming and completion,
553 * which could cause too early flip completion events.
555 if (adev->family >= AMDGPU_FAMILY_RV &&
556 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
557 acrtc->dm_irq_params.active_planes == 0) {
559 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
561 drm_crtc_vblank_put(&acrtc->base);
563 acrtc->pflip_status = AMDGPU_FLIP_NONE;
566 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
569 static int dm_set_clockgating_state(void *handle,
570 enum amd_clockgating_state state)
575 static int dm_set_powergating_state(void *handle,
576 enum amd_powergating_state state)
581 /* Prototypes of private functions */
582 static int dm_early_init(void* handle);
584 /* Allocate memory for FBC compressed data */
585 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
587 struct drm_device *dev = connector->dev;
588 struct amdgpu_device *adev = drm_to_adev(dev);
589 struct dm_compressor_info *compressor = &adev->dm.compressor;
590 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
591 struct drm_display_mode *mode;
592 unsigned long max_size = 0;
594 if (adev->dm.dc->fbc_compressor == NULL)
597 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
600 if (compressor->bo_ptr)
604 list_for_each_entry(mode, &connector->modes, head) {
605 if (max_size < mode->htotal * mode->vtotal)
606 max_size = mode->htotal * mode->vtotal;
610 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
611 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
612 &compressor->gpu_addr, &compressor->cpu_addr);
615 DRM_ERROR("DM: Failed to initialize FBC\n");
617 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
618 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
625 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
626 int pipe, bool *enabled,
627 unsigned char *buf, int max_bytes)
629 struct drm_device *dev = dev_get_drvdata(kdev);
630 struct amdgpu_device *adev = drm_to_adev(dev);
631 struct drm_connector *connector;
632 struct drm_connector_list_iter conn_iter;
633 struct amdgpu_dm_connector *aconnector;
638 mutex_lock(&adev->dm.audio_lock);
640 drm_connector_list_iter_begin(dev, &conn_iter);
641 drm_for_each_connector_iter(connector, &conn_iter) {
642 aconnector = to_amdgpu_dm_connector(connector);
643 if (aconnector->audio_inst != port)
647 ret = drm_eld_size(connector->eld);
648 memcpy(buf, connector->eld, min(max_bytes, ret));
652 drm_connector_list_iter_end(&conn_iter);
654 mutex_unlock(&adev->dm.audio_lock);
656 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
661 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
662 .get_eld = amdgpu_dm_audio_component_get_eld,
665 static int amdgpu_dm_audio_component_bind(struct device *kdev,
666 struct device *hda_kdev, void *data)
668 struct drm_device *dev = dev_get_drvdata(kdev);
669 struct amdgpu_device *adev = drm_to_adev(dev);
670 struct drm_audio_component *acomp = data;
672 acomp->ops = &amdgpu_dm_audio_component_ops;
674 adev->dm.audio_component = acomp;
679 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
680 struct device *hda_kdev, void *data)
682 struct drm_device *dev = dev_get_drvdata(kdev);
683 struct amdgpu_device *adev = drm_to_adev(dev);
684 struct drm_audio_component *acomp = data;
688 adev->dm.audio_component = NULL;
691 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
692 .bind = amdgpu_dm_audio_component_bind,
693 .unbind = amdgpu_dm_audio_component_unbind,
696 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
703 adev->mode_info.audio.enabled = true;
705 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
707 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
708 adev->mode_info.audio.pin[i].channels = -1;
709 adev->mode_info.audio.pin[i].rate = -1;
710 adev->mode_info.audio.pin[i].bits_per_sample = -1;
711 adev->mode_info.audio.pin[i].status_bits = 0;
712 adev->mode_info.audio.pin[i].category_code = 0;
713 adev->mode_info.audio.pin[i].connected = false;
714 adev->mode_info.audio.pin[i].id =
715 adev->dm.dc->res_pool->audios[i]->inst;
716 adev->mode_info.audio.pin[i].offset = 0;
719 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
723 adev->dm.audio_registered = true;
728 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
733 if (!adev->mode_info.audio.enabled)
736 if (adev->dm.audio_registered) {
737 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
738 adev->dm.audio_registered = false;
741 /* TODO: Disable audio? */
743 adev->mode_info.audio.enabled = false;
746 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
748 struct drm_audio_component *acomp = adev->dm.audio_component;
750 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
751 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
753 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
758 static int dm_dmub_hw_init(struct amdgpu_device *adev)
760 const struct dmcub_firmware_header_v1_0 *hdr;
761 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
762 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
763 const struct firmware *dmub_fw = adev->dm.dmub_fw;
764 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
765 struct abm *abm = adev->dm.dc->res_pool->abm;
766 struct dmub_srv_hw_params hw_params;
767 enum dmub_status status;
768 const unsigned char *fw_inst_const, *fw_bss_data;
769 uint32_t i, fw_inst_const_size, fw_bss_data_size;
773 /* DMUB isn't supported on the ASIC. */
777 DRM_ERROR("No framebuffer info for DMUB service.\n");
782 /* Firmware required for DMUB support. */
783 DRM_ERROR("No firmware provided for DMUB.\n");
787 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
788 if (status != DMUB_STATUS_OK) {
789 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
793 if (!has_hw_support) {
794 DRM_INFO("DMUB unsupported on ASIC\n");
798 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
800 fw_inst_const = dmub_fw->data +
801 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
804 fw_bss_data = dmub_fw->data +
805 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806 le32_to_cpu(hdr->inst_const_bytes);
808 /* Copy firmware and bios info into FB memory. */
809 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
810 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
812 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
814 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
815 * amdgpu_ucode_init_single_fw will load dmub firmware
816 * fw_inst_const part to cw0; otherwise, the firmware back door load
817 * will be done by dm_dmub_hw_init
819 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
820 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
824 if (fw_bss_data_size)
825 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
826 fw_bss_data, fw_bss_data_size);
828 /* Copy firmware bios info into FB memory. */
829 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
832 /* Reset regions that need to be reset. */
833 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
834 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
836 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
837 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
839 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
840 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
842 /* Initialize hardware. */
843 memset(&hw_params, 0, sizeof(hw_params));
844 hw_params.fb_base = adev->gmc.fb_start;
845 hw_params.fb_offset = adev->gmc.aper_base;
847 /* backdoor load firmware and trigger dmub running */
848 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
849 hw_params.load_inst_const = true;
852 hw_params.psp_version = dmcu->psp_version;
854 for (i = 0; i < fb_info->num_fb; ++i)
855 hw_params.fb[i] = &fb_info->fb[i];
857 status = dmub_srv_hw_init(dmub_srv, &hw_params);
858 if (status != DMUB_STATUS_OK) {
859 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
863 /* Wait for firmware load to finish. */
864 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
865 if (status != DMUB_STATUS_OK)
866 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
868 /* Init DMCU and ABM if available. */
870 dmcu->funcs->dmcu_init(dmcu);
871 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
874 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875 if (!adev->dm.dc->ctx->dmub_srv) {
876 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
880 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881 adev->dm.dmcub_fw_version);
886 #if defined(CONFIG_DRM_AMD_DC_DCN)
887 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
890 uint32_t logical_addr_low;
891 uint32_t logical_addr_high;
892 uint32_t agp_base, agp_bot, agp_top;
893 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
895 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
896 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
898 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
900 * Raven2 has a HW issue that it is unable to use the vram which
901 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
902 * workaround that increase system aperture high address (add 1)
903 * to get rid of the VM fault and hardware hang.
905 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
907 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
910 agp_bot = adev->gmc.agp_start >> 24;
911 agp_top = adev->gmc.agp_end >> 24;
914 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
915 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
916 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
917 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
918 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
919 page_table_base.low_part = lower_32_bits(pt_base);
921 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
922 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
924 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
925 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
926 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
928 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
929 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
930 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
932 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
933 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
934 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
936 pa_config->is_hvm_enabled = 0;
941 static int amdgpu_dm_init(struct amdgpu_device *adev)
943 struct dc_init_data init_data;
944 #ifdef CONFIG_DRM_AMD_DC_HDCP
945 struct dc_callback_init init_params;
949 adev->dm.ddev = adev_to_drm(adev);
950 adev->dm.adev = adev;
952 /* Zero all the fields */
953 memset(&init_data, 0, sizeof(init_data));
954 #ifdef CONFIG_DRM_AMD_DC_HDCP
955 memset(&init_params, 0, sizeof(init_params));
958 mutex_init(&adev->dm.dc_lock);
959 mutex_init(&adev->dm.audio_lock);
961 if(amdgpu_dm_irq_init(adev)) {
962 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
966 init_data.asic_id.chip_family = adev->family;
968 init_data.asic_id.pci_revision_id = adev->pdev->revision;
969 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
971 init_data.asic_id.vram_width = adev->gmc.vram_width;
972 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
973 init_data.asic_id.atombios_base_address =
974 adev->mode_info.atom_context->bios;
976 init_data.driver = adev;
978 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
980 if (!adev->dm.cgs_device) {
981 DRM_ERROR("amdgpu: failed to create cgs device.\n");
985 init_data.cgs_device = adev->dm.cgs_device;
987 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
989 switch (adev->asic_type) {
994 init_data.flags.gpu_vm_support = true;
995 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
996 init_data.flags.disable_dmcu = true;
998 #if defined(CONFIG_DRM_AMD_DC_DCN)
1000 init_data.flags.gpu_vm_support = true;
1007 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1008 init_data.flags.fbc_support = true;
1010 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1011 init_data.flags.multi_mon_pp_mclk_switch = true;
1013 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1014 init_data.flags.disable_fractional_pwm = true;
1016 init_data.flags.power_down_display_on_boot = true;
1018 /* Display Core create. */
1019 adev->dm.dc = dc_create(&init_data);
1022 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1024 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1028 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1029 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1030 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1033 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1034 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1036 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1037 adev->dm.dc->debug.disable_stutter = true;
1039 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1040 adev->dm.dc->debug.disable_dsc = true;
1042 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1043 adev->dm.dc->debug.disable_clock_gate = true;
1045 r = dm_dmub_hw_init(adev);
1047 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1051 dc_hardware_init(adev->dm.dc);
1053 #if defined(CONFIG_DRM_AMD_DC_DCN)
1054 if (adev->apu_flags) {
1055 struct dc_phy_addr_space_config pa_config;
1057 mmhub_read_system_context(adev, &pa_config);
1059 // Call the DC init_memory func
1060 dc_setup_system_context(adev->dm.dc, &pa_config);
1064 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1065 if (!adev->dm.freesync_module) {
1067 "amdgpu: failed to initialize freesync_module.\n");
1069 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1070 adev->dm.freesync_module);
1072 amdgpu_dm_init_color_mod();
1074 #ifdef CONFIG_DRM_AMD_DC_HDCP
1075 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1076 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1078 if (!adev->dm.hdcp_workqueue)
1079 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1081 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1083 dc_init_callbacks(adev->dm.dc, &init_params);
1086 if (amdgpu_dm_initialize_drm_device(adev)) {
1088 "amdgpu: failed to initialize sw for display support.\n");
1092 /* create fake encoders for MST */
1093 dm_dp_create_fake_mst_encoders(adev);
1095 /* TODO: Add_display_info? */
1097 /* TODO use dynamic cursor width */
1098 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1099 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1101 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1103 "amdgpu: failed to initialize sw for display support.\n");
1108 DRM_DEBUG_DRIVER("KMS initialized.\n");
1112 amdgpu_dm_fini(adev);
1117 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1121 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1122 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1125 amdgpu_dm_audio_fini(adev);
1127 amdgpu_dm_destroy_drm_device(&adev->dm);
1129 #ifdef CONFIG_DRM_AMD_DC_HDCP
1130 if (adev->dm.hdcp_workqueue) {
1131 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1132 adev->dm.hdcp_workqueue = NULL;
1136 dc_deinit_callbacks(adev->dm.dc);
1138 if (adev->dm.dc->ctx->dmub_srv) {
1139 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1140 adev->dm.dc->ctx->dmub_srv = NULL;
1143 if (adev->dm.dmub_bo)
1144 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1145 &adev->dm.dmub_bo_gpu_addr,
1146 &adev->dm.dmub_bo_cpu_addr);
1148 /* DC Destroy TODO: Replace destroy DAL */
1150 dc_destroy(&adev->dm.dc);
1152 * TODO: pageflip, vlank interrupt
1154 * amdgpu_dm_irq_fini(adev);
1157 if (adev->dm.cgs_device) {
1158 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1159 adev->dm.cgs_device = NULL;
1161 if (adev->dm.freesync_module) {
1162 mod_freesync_destroy(adev->dm.freesync_module);
1163 adev->dm.freesync_module = NULL;
1166 mutex_destroy(&adev->dm.audio_lock);
1167 mutex_destroy(&adev->dm.dc_lock);
1172 static int load_dmcu_fw(struct amdgpu_device *adev)
1174 const char *fw_name_dmcu = NULL;
1176 const struct dmcu_firmware_header_v1_0 *hdr;
1178 switch(adev->asic_type) {
1179 #if defined(CONFIG_DRM_AMD_DC_SI)
1194 case CHIP_POLARIS11:
1195 case CHIP_POLARIS10:
1196 case CHIP_POLARIS12:
1204 case CHIP_SIENNA_CICHLID:
1205 case CHIP_NAVY_FLOUNDER:
1206 case CHIP_DIMGREY_CAVEFISH:
1210 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1213 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1214 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1215 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1216 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1221 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1225 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1226 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1230 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1232 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1233 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1234 adev->dm.fw_dmcu = NULL;
1238 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1243 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1245 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1247 release_firmware(adev->dm.fw_dmcu);
1248 adev->dm.fw_dmcu = NULL;
1252 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1253 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1254 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1255 adev->firmware.fw_size +=
1256 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1258 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1259 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1260 adev->firmware.fw_size +=
1261 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1263 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1265 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1270 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1272 struct amdgpu_device *adev = ctx;
1274 return dm_read_reg(adev->dm.dc->ctx, address);
1277 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1280 struct amdgpu_device *adev = ctx;
1282 return dm_write_reg(adev->dm.dc->ctx, address, value);
1285 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1287 struct dmub_srv_create_params create_params;
1288 struct dmub_srv_region_params region_params;
1289 struct dmub_srv_region_info region_info;
1290 struct dmub_srv_fb_params fb_params;
1291 struct dmub_srv_fb_info *fb_info;
1292 struct dmub_srv *dmub_srv;
1293 const struct dmcub_firmware_header_v1_0 *hdr;
1294 const char *fw_name_dmub;
1295 enum dmub_asic dmub_asic;
1296 enum dmub_status status;
1299 switch (adev->asic_type) {
1301 dmub_asic = DMUB_ASIC_DCN21;
1302 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1303 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1304 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1306 case CHIP_SIENNA_CICHLID:
1307 dmub_asic = DMUB_ASIC_DCN30;
1308 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1310 case CHIP_NAVY_FLOUNDER:
1311 dmub_asic = DMUB_ASIC_DCN30;
1312 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1315 dmub_asic = DMUB_ASIC_DCN301;
1316 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1318 case CHIP_DIMGREY_CAVEFISH:
1319 dmub_asic = DMUB_ASIC_DCN302;
1320 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1324 /* ASIC doesn't support DMUB. */
1328 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1330 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1334 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1336 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1340 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1342 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1343 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1344 AMDGPU_UCODE_ID_DMCUB;
1345 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1347 adev->firmware.fw_size +=
1348 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1350 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1351 adev->dm.dmcub_fw_version);
1354 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1356 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1357 dmub_srv = adev->dm.dmub_srv;
1360 DRM_ERROR("Failed to allocate DMUB service!\n");
1364 memset(&create_params, 0, sizeof(create_params));
1365 create_params.user_ctx = adev;
1366 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1367 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1368 create_params.asic = dmub_asic;
1370 /* Create the DMUB service. */
1371 status = dmub_srv_create(dmub_srv, &create_params);
1372 if (status != DMUB_STATUS_OK) {
1373 DRM_ERROR("Error creating DMUB service: %d\n", status);
1377 /* Calculate the size of all the regions for the DMUB service. */
1378 memset(®ion_params, 0, sizeof(region_params));
1380 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1381 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1382 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1383 region_params.vbios_size = adev->bios_size;
1384 region_params.fw_bss_data = region_params.bss_data_size ?
1385 adev->dm.dmub_fw->data +
1386 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1387 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1388 region_params.fw_inst_const =
1389 adev->dm.dmub_fw->data +
1390 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1393 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1396 if (status != DMUB_STATUS_OK) {
1397 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1402 * Allocate a framebuffer based on the total size of all the regions.
1403 * TODO: Move this into GART.
1405 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1406 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1407 &adev->dm.dmub_bo_gpu_addr,
1408 &adev->dm.dmub_bo_cpu_addr);
1412 /* Rebase the regions on the framebuffer address. */
1413 memset(&fb_params, 0, sizeof(fb_params));
1414 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1415 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1416 fb_params.region_info = ®ion_info;
1418 adev->dm.dmub_fb_info =
1419 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1420 fb_info = adev->dm.dmub_fb_info;
1424 "Failed to allocate framebuffer info for DMUB service!\n");
1428 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1429 if (status != DMUB_STATUS_OK) {
1430 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1437 static int dm_sw_init(void *handle)
1439 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1442 r = dm_dmub_sw_init(adev);
1446 return load_dmcu_fw(adev);
1449 static int dm_sw_fini(void *handle)
1451 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1453 kfree(adev->dm.dmub_fb_info);
1454 adev->dm.dmub_fb_info = NULL;
1456 if (adev->dm.dmub_srv) {
1457 dmub_srv_destroy(adev->dm.dmub_srv);
1458 adev->dm.dmub_srv = NULL;
1461 release_firmware(adev->dm.dmub_fw);
1462 adev->dm.dmub_fw = NULL;
1464 release_firmware(adev->dm.fw_dmcu);
1465 adev->dm.fw_dmcu = NULL;
1470 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1472 struct amdgpu_dm_connector *aconnector;
1473 struct drm_connector *connector;
1474 struct drm_connector_list_iter iter;
1477 drm_connector_list_iter_begin(dev, &iter);
1478 drm_for_each_connector_iter(connector, &iter) {
1479 aconnector = to_amdgpu_dm_connector(connector);
1480 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1481 aconnector->mst_mgr.aux) {
1482 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1484 aconnector->base.base.id);
1486 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1488 DRM_ERROR("DM_MST: Failed to start MST\n");
1489 aconnector->dc_link->type =
1490 dc_connection_single;
1495 drm_connector_list_iter_end(&iter);
1500 static int dm_late_init(void *handle)
1502 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1504 struct dmcu_iram_parameters params;
1505 unsigned int linear_lut[16];
1507 struct dmcu *dmcu = NULL;
1510 dmcu = adev->dm.dc->res_pool->dmcu;
1512 for (i = 0; i < 16; i++)
1513 linear_lut[i] = 0xFFFF * i / 15;
1516 params.backlight_ramping_start = 0xCCCC;
1517 params.backlight_ramping_reduction = 0xCCCCCCCC;
1518 params.backlight_lut_array_size = 16;
1519 params.backlight_lut_array = linear_lut;
1521 /* Min backlight level after ABM reduction, Don't allow below 1%
1522 * 0xFFFF x 0.01 = 0x28F
1524 params.min_abm_backlight = 0x28F;
1526 /* In the case where abm is implemented on dmcub,
1527 * dmcu object will be null.
1528 * ABM 2.4 and up are implemented on dmcub.
1531 ret = dmcu_load_iram(dmcu, params);
1532 else if (adev->dm.dc->ctx->dmub_srv)
1533 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1538 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1541 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1543 struct amdgpu_dm_connector *aconnector;
1544 struct drm_connector *connector;
1545 struct drm_connector_list_iter iter;
1546 struct drm_dp_mst_topology_mgr *mgr;
1548 bool need_hotplug = false;
1550 drm_connector_list_iter_begin(dev, &iter);
1551 drm_for_each_connector_iter(connector, &iter) {
1552 aconnector = to_amdgpu_dm_connector(connector);
1553 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1554 aconnector->mst_port)
1557 mgr = &aconnector->mst_mgr;
1560 drm_dp_mst_topology_mgr_suspend(mgr);
1562 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1564 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1565 need_hotplug = true;
1569 drm_connector_list_iter_end(&iter);
1572 drm_kms_helper_hotplug_event(dev);
1575 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1577 struct smu_context *smu = &adev->smu;
1580 if (!is_support_sw_smu(adev))
1583 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1584 * on window driver dc implementation.
1585 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1586 * should be passed to smu during boot up and resume from s3.
1587 * boot up: dc calculate dcn watermark clock settings within dc_create,
1588 * dcn20_resource_construct
1589 * then call pplib functions below to pass the settings to smu:
1590 * smu_set_watermarks_for_clock_ranges
1591 * smu_set_watermarks_table
1592 * navi10_set_watermarks_table
1593 * smu_write_watermarks_table
1595 * For Renoir, clock settings of dcn watermark are also fixed values.
1596 * dc has implemented different flow for window driver:
1597 * dc_hardware_init / dc_set_power_state
1602 * smu_set_watermarks_for_clock_ranges
1603 * renoir_set_watermarks_table
1604 * smu_write_watermarks_table
1607 * dc_hardware_init -> amdgpu_dm_init
1608 * dc_set_power_state --> dm_resume
1610 * therefore, this function apply to navi10/12/14 but not Renoir
1613 switch(adev->asic_type) {
1622 ret = smu_write_watermarks_table(smu);
1624 DRM_ERROR("Failed to update WMTABLE!\n");
1632 * dm_hw_init() - Initialize DC device
1633 * @handle: The base driver device containing the amdgpu_dm device.
1635 * Initialize the &struct amdgpu_display_manager device. This involves calling
1636 * the initializers of each DM component, then populating the struct with them.
1638 * Although the function implies hardware initialization, both hardware and
1639 * software are initialized here. Splitting them out to their relevant init
1640 * hooks is a future TODO item.
1642 * Some notable things that are initialized here:
1644 * - Display Core, both software and hardware
1645 * - DC modules that we need (freesync and color management)
1646 * - DRM software states
1647 * - Interrupt sources and handlers
1649 * - Debug FS entries, if enabled
1651 static int dm_hw_init(void *handle)
1653 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1654 /* Create DAL display manager */
1655 amdgpu_dm_init(adev);
1656 amdgpu_dm_hpd_init(adev);
1662 * dm_hw_fini() - Teardown DC device
1663 * @handle: The base driver device containing the amdgpu_dm device.
1665 * Teardown components within &struct amdgpu_display_manager that require
1666 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1667 * were loaded. Also flush IRQ workqueues and disable them.
1669 static int dm_hw_fini(void *handle)
1671 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1673 amdgpu_dm_hpd_fini(adev);
1675 amdgpu_dm_irq_fini(adev);
1676 amdgpu_dm_fini(adev);
1681 static int dm_enable_vblank(struct drm_crtc *crtc);
1682 static void dm_disable_vblank(struct drm_crtc *crtc);
1684 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1685 struct dc_state *state, bool enable)
1687 enum dc_irq_source irq_source;
1688 struct amdgpu_crtc *acrtc;
1692 for (i = 0; i < state->stream_count; i++) {
1693 acrtc = get_crtc_by_otg_inst(
1694 adev, state->stream_status[i].primary_otg_inst);
1696 if (acrtc && state->stream_status[i].plane_count != 0) {
1697 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1698 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1699 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1700 acrtc->crtc_id, enable ? "en" : "dis", rc);
1702 DRM_WARN("Failed to %s pflip interrupts\n",
1703 enable ? "enable" : "disable");
1706 rc = dm_enable_vblank(&acrtc->base);
1708 DRM_WARN("Failed to enable vblank interrupts\n");
1710 dm_disable_vblank(&acrtc->base);
1718 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1720 struct dc_state *context = NULL;
1721 enum dc_status res = DC_ERROR_UNEXPECTED;
1723 struct dc_stream_state *del_streams[MAX_PIPES];
1724 int del_streams_count = 0;
1726 memset(del_streams, 0, sizeof(del_streams));
1728 context = dc_create_state(dc);
1729 if (context == NULL)
1730 goto context_alloc_fail;
1732 dc_resource_state_copy_construct_current(dc, context);
1734 /* First remove from context all streams */
1735 for (i = 0; i < context->stream_count; i++) {
1736 struct dc_stream_state *stream = context->streams[i];
1738 del_streams[del_streams_count++] = stream;
1741 /* Remove all planes for removed streams and then remove the streams */
1742 for (i = 0; i < del_streams_count; i++) {
1743 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1744 res = DC_FAIL_DETACH_SURFACES;
1748 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1754 res = dc_validate_global_state(dc, context, false);
1757 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1761 res = dc_commit_state(dc, context);
1764 dc_release_state(context);
1770 static int dm_suspend(void *handle)
1772 struct amdgpu_device *adev = handle;
1773 struct amdgpu_display_manager *dm = &adev->dm;
1776 if (amdgpu_in_reset(adev)) {
1777 mutex_lock(&dm->dc_lock);
1779 #if defined(CONFIG_DRM_AMD_DC_DCN)
1780 dc_allow_idle_optimizations(adev->dm.dc, false);
1783 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1785 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1787 amdgpu_dm_commit_zero_streams(dm->dc);
1789 amdgpu_dm_irq_suspend(adev);
1794 WARN_ON(adev->dm.cached_state);
1795 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1797 s3_handle_mst(adev_to_drm(adev), true);
1799 amdgpu_dm_irq_suspend(adev);
1802 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1807 static struct amdgpu_dm_connector *
1808 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1809 struct drm_crtc *crtc)
1812 struct drm_connector_state *new_con_state;
1813 struct drm_connector *connector;
1814 struct drm_crtc *crtc_from_state;
1816 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1817 crtc_from_state = new_con_state->crtc;
1819 if (crtc_from_state == crtc)
1820 return to_amdgpu_dm_connector(connector);
1826 static void emulated_link_detect(struct dc_link *link)
1828 struct dc_sink_init_data sink_init_data = { 0 };
1829 struct display_sink_capability sink_caps = { 0 };
1830 enum dc_edid_status edid_status;
1831 struct dc_context *dc_ctx = link->ctx;
1832 struct dc_sink *sink = NULL;
1833 struct dc_sink *prev_sink = NULL;
1835 link->type = dc_connection_none;
1836 prev_sink = link->local_sink;
1839 dc_sink_release(prev_sink);
1841 switch (link->connector_signal) {
1842 case SIGNAL_TYPE_HDMI_TYPE_A: {
1843 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1844 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1848 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1849 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1850 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1854 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1855 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1856 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1860 case SIGNAL_TYPE_LVDS: {
1861 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1862 sink_caps.signal = SIGNAL_TYPE_LVDS;
1866 case SIGNAL_TYPE_EDP: {
1867 sink_caps.transaction_type =
1868 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1869 sink_caps.signal = SIGNAL_TYPE_EDP;
1873 case SIGNAL_TYPE_DISPLAY_PORT: {
1874 sink_caps.transaction_type =
1875 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1876 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1881 DC_ERROR("Invalid connector type! signal:%d\n",
1882 link->connector_signal);
1886 sink_init_data.link = link;
1887 sink_init_data.sink_signal = sink_caps.signal;
1889 sink = dc_sink_create(&sink_init_data);
1891 DC_ERROR("Failed to create sink!\n");
1895 /* dc_sink_create returns a new reference */
1896 link->local_sink = sink;
1898 edid_status = dm_helpers_read_local_edid(
1903 if (edid_status != EDID_OK)
1904 DC_ERROR("Failed to read EDID");
1908 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1909 struct amdgpu_display_manager *dm)
1912 struct dc_surface_update surface_updates[MAX_SURFACES];
1913 struct dc_plane_info plane_infos[MAX_SURFACES];
1914 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1915 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1916 struct dc_stream_update stream_update;
1920 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1923 dm_error("Failed to allocate update bundle\n");
1927 for (k = 0; k < dc_state->stream_count; k++) {
1928 bundle->stream_update.stream = dc_state->streams[k];
1930 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1931 bundle->surface_updates[m].surface =
1932 dc_state->stream_status->plane_states[m];
1933 bundle->surface_updates[m].surface->force_full_update =
1936 dc_commit_updates_for_stream(
1937 dm->dc, bundle->surface_updates,
1938 dc_state->stream_status->plane_count,
1939 dc_state->streams[k], &bundle->stream_update);
1948 static void dm_set_dpms_off(struct dc_link *link)
1950 struct dc_stream_state *stream_state;
1951 struct amdgpu_dm_connector *aconnector = link->priv;
1952 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1953 struct dc_stream_update stream_update;
1954 bool dpms_off = true;
1956 memset(&stream_update, 0, sizeof(stream_update));
1957 stream_update.dpms_off = &dpms_off;
1959 mutex_lock(&adev->dm.dc_lock);
1960 stream_state = dc_stream_find_from_link(link);
1962 if (stream_state == NULL) {
1963 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
1964 mutex_unlock(&adev->dm.dc_lock);
1968 stream_update.stream = stream_state;
1969 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
1970 stream_state, &stream_update);
1971 mutex_unlock(&adev->dm.dc_lock);
1974 static int dm_resume(void *handle)
1976 struct amdgpu_device *adev = handle;
1977 struct drm_device *ddev = adev_to_drm(adev);
1978 struct amdgpu_display_manager *dm = &adev->dm;
1979 struct amdgpu_dm_connector *aconnector;
1980 struct drm_connector *connector;
1981 struct drm_connector_list_iter iter;
1982 struct drm_crtc *crtc;
1983 struct drm_crtc_state *new_crtc_state;
1984 struct dm_crtc_state *dm_new_crtc_state;
1985 struct drm_plane *plane;
1986 struct drm_plane_state *new_plane_state;
1987 struct dm_plane_state *dm_new_plane_state;
1988 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1989 enum dc_connection_type new_connection_type = dc_connection_none;
1990 struct dc_state *dc_state;
1993 if (amdgpu_in_reset(adev)) {
1994 dc_state = dm->cached_dc_state;
1996 r = dm_dmub_hw_init(adev);
1998 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2000 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2003 amdgpu_dm_irq_resume_early(adev);
2005 for (i = 0; i < dc_state->stream_count; i++) {
2006 dc_state->streams[i]->mode_changed = true;
2007 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2008 dc_state->stream_status->plane_states[j]->update_flags.raw
2013 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2015 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2017 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2019 dc_release_state(dm->cached_dc_state);
2020 dm->cached_dc_state = NULL;
2022 amdgpu_dm_irq_resume_late(adev);
2024 mutex_unlock(&dm->dc_lock);
2028 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2029 dc_release_state(dm_state->context);
2030 dm_state->context = dc_create_state(dm->dc);
2031 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2032 dc_resource_state_construct(dm->dc, dm_state->context);
2034 /* Before powering on DC we need to re-initialize DMUB. */
2035 r = dm_dmub_hw_init(adev);
2037 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2039 /* power on hardware */
2040 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2042 /* program HPD filter */
2046 * early enable HPD Rx IRQ, should be done before set mode as short
2047 * pulse interrupts are used for MST
2049 amdgpu_dm_irq_resume_early(adev);
2051 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2052 s3_handle_mst(ddev, false);
2055 drm_connector_list_iter_begin(ddev, &iter);
2056 drm_for_each_connector_iter(connector, &iter) {
2057 aconnector = to_amdgpu_dm_connector(connector);
2060 * this is the case when traversing through already created
2061 * MST connectors, should be skipped
2063 if (aconnector->mst_port)
2066 mutex_lock(&aconnector->hpd_lock);
2067 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2068 DRM_ERROR("KMS: Failed to detect connector\n");
2070 if (aconnector->base.force && new_connection_type == dc_connection_none)
2071 emulated_link_detect(aconnector->dc_link);
2073 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2075 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2076 aconnector->fake_enable = false;
2078 if (aconnector->dc_sink)
2079 dc_sink_release(aconnector->dc_sink);
2080 aconnector->dc_sink = NULL;
2081 amdgpu_dm_update_connector_after_detect(aconnector);
2082 mutex_unlock(&aconnector->hpd_lock);
2084 drm_connector_list_iter_end(&iter);
2086 /* Force mode set in atomic commit */
2087 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2088 new_crtc_state->active_changed = true;
2091 * atomic_check is expected to create the dc states. We need to release
2092 * them here, since they were duplicated as part of the suspend
2095 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2096 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2097 if (dm_new_crtc_state->stream) {
2098 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2099 dc_stream_release(dm_new_crtc_state->stream);
2100 dm_new_crtc_state->stream = NULL;
2104 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2105 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2106 if (dm_new_plane_state->dc_state) {
2107 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2108 dc_plane_state_release(dm_new_plane_state->dc_state);
2109 dm_new_plane_state->dc_state = NULL;
2113 drm_atomic_helper_resume(ddev, dm->cached_state);
2115 dm->cached_state = NULL;
2117 amdgpu_dm_irq_resume_late(adev);
2119 amdgpu_dm_smu_write_watermarks_table(adev);
2127 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2128 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2129 * the base driver's device list to be initialized and torn down accordingly.
2131 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2134 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2136 .early_init = dm_early_init,
2137 .late_init = dm_late_init,
2138 .sw_init = dm_sw_init,
2139 .sw_fini = dm_sw_fini,
2140 .hw_init = dm_hw_init,
2141 .hw_fini = dm_hw_fini,
2142 .suspend = dm_suspend,
2143 .resume = dm_resume,
2144 .is_idle = dm_is_idle,
2145 .wait_for_idle = dm_wait_for_idle,
2146 .check_soft_reset = dm_check_soft_reset,
2147 .soft_reset = dm_soft_reset,
2148 .set_clockgating_state = dm_set_clockgating_state,
2149 .set_powergating_state = dm_set_powergating_state,
2152 const struct amdgpu_ip_block_version dm_ip_block =
2154 .type = AMD_IP_BLOCK_TYPE_DCE,
2158 .funcs = &amdgpu_dm_funcs,
2168 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2169 .fb_create = amdgpu_display_user_framebuffer_create,
2170 .get_format_info = amd_get_format_info,
2171 .output_poll_changed = drm_fb_helper_output_poll_changed,
2172 .atomic_check = amdgpu_dm_atomic_check,
2173 .atomic_commit = drm_atomic_helper_commit,
2176 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2177 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2180 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2182 u32 max_cll, min_cll, max, min, q, r;
2183 struct amdgpu_dm_backlight_caps *caps;
2184 struct amdgpu_display_manager *dm;
2185 struct drm_connector *conn_base;
2186 struct amdgpu_device *adev;
2187 struct dc_link *link = NULL;
2188 static const u8 pre_computed_values[] = {
2189 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2190 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2192 if (!aconnector || !aconnector->dc_link)
2195 link = aconnector->dc_link;
2196 if (link->connector_signal != SIGNAL_TYPE_EDP)
2199 conn_base = &aconnector->base;
2200 adev = drm_to_adev(conn_base->dev);
2202 caps = &dm->backlight_caps;
2203 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2204 caps->aux_support = false;
2205 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2206 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2208 if (caps->ext_caps->bits.oled == 1 ||
2209 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2210 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2211 caps->aux_support = true;
2213 /* From the specification (CTA-861-G), for calculating the maximum
2214 * luminance we need to use:
2215 * Luminance = 50*2**(CV/32)
2216 * Where CV is a one-byte value.
2217 * For calculating this expression we may need float point precision;
2218 * to avoid this complexity level, we take advantage that CV is divided
2219 * by a constant. From the Euclids division algorithm, we know that CV
2220 * can be written as: CV = 32*q + r. Next, we replace CV in the
2221 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2222 * need to pre-compute the value of r/32. For pre-computing the values
2223 * We just used the following Ruby line:
2224 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2225 * The results of the above expressions can be verified at
2226 * pre_computed_values.
2230 max = (1 << q) * pre_computed_values[r];
2232 // min luminance: maxLum * (CV/255)^2 / 100
2233 q = DIV_ROUND_CLOSEST(min_cll, 255);
2234 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2236 caps->aux_max_input_signal = max;
2237 caps->aux_min_input_signal = min;
2240 void amdgpu_dm_update_connector_after_detect(
2241 struct amdgpu_dm_connector *aconnector)
2243 struct drm_connector *connector = &aconnector->base;
2244 struct drm_device *dev = connector->dev;
2245 struct dc_sink *sink;
2247 /* MST handled by drm_mst framework */
2248 if (aconnector->mst_mgr.mst_state == true)
2251 sink = aconnector->dc_link->local_sink;
2253 dc_sink_retain(sink);
2256 * Edid mgmt connector gets first update only in mode_valid hook and then
2257 * the connector sink is set to either fake or physical sink depends on link status.
2258 * Skip if already done during boot.
2260 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2261 && aconnector->dc_em_sink) {
2264 * For S3 resume with headless use eml_sink to fake stream
2265 * because on resume connector->sink is set to NULL
2267 mutex_lock(&dev->mode_config.mutex);
2270 if (aconnector->dc_sink) {
2271 amdgpu_dm_update_freesync_caps(connector, NULL);
2273 * retain and release below are used to
2274 * bump up refcount for sink because the link doesn't point
2275 * to it anymore after disconnect, so on next crtc to connector
2276 * reshuffle by UMD we will get into unwanted dc_sink release
2278 dc_sink_release(aconnector->dc_sink);
2280 aconnector->dc_sink = sink;
2281 dc_sink_retain(aconnector->dc_sink);
2282 amdgpu_dm_update_freesync_caps(connector,
2285 amdgpu_dm_update_freesync_caps(connector, NULL);
2286 if (!aconnector->dc_sink) {
2287 aconnector->dc_sink = aconnector->dc_em_sink;
2288 dc_sink_retain(aconnector->dc_sink);
2292 mutex_unlock(&dev->mode_config.mutex);
2295 dc_sink_release(sink);
2300 * TODO: temporary guard to look for proper fix
2301 * if this sink is MST sink, we should not do anything
2303 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2304 dc_sink_release(sink);
2308 if (aconnector->dc_sink == sink) {
2310 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2313 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2314 aconnector->connector_id);
2316 dc_sink_release(sink);
2320 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2321 aconnector->connector_id, aconnector->dc_sink, sink);
2323 mutex_lock(&dev->mode_config.mutex);
2326 * 1. Update status of the drm connector
2327 * 2. Send an event and let userspace tell us what to do
2331 * TODO: check if we still need the S3 mode update workaround.
2332 * If yes, put it here.
2334 if (aconnector->dc_sink) {
2335 amdgpu_dm_update_freesync_caps(connector, NULL);
2336 dc_sink_release(aconnector->dc_sink);
2339 aconnector->dc_sink = sink;
2340 dc_sink_retain(aconnector->dc_sink);
2341 if (sink->dc_edid.length == 0) {
2342 aconnector->edid = NULL;
2343 if (aconnector->dc_link->aux_mode) {
2344 drm_dp_cec_unset_edid(
2345 &aconnector->dm_dp_aux.aux);
2349 (struct edid *)sink->dc_edid.raw_edid;
2351 drm_connector_update_edid_property(connector,
2353 if (aconnector->dc_link->aux_mode)
2354 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2358 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2359 update_connector_ext_caps(aconnector);
2361 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2362 amdgpu_dm_update_freesync_caps(connector, NULL);
2363 drm_connector_update_edid_property(connector, NULL);
2364 aconnector->num_modes = 0;
2365 dc_sink_release(aconnector->dc_sink);
2366 aconnector->dc_sink = NULL;
2367 aconnector->edid = NULL;
2368 #ifdef CONFIG_DRM_AMD_DC_HDCP
2369 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2370 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2371 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2375 mutex_unlock(&dev->mode_config.mutex);
2377 update_subconnector_property(aconnector);
2380 dc_sink_release(sink);
2383 static void handle_hpd_irq(void *param)
2385 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2386 struct drm_connector *connector = &aconnector->base;
2387 struct drm_device *dev = connector->dev;
2388 enum dc_connection_type new_connection_type = dc_connection_none;
2389 #ifdef CONFIG_DRM_AMD_DC_HDCP
2390 struct amdgpu_device *adev = drm_to_adev(dev);
2391 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2395 * In case of failure or MST no need to update connector status or notify the OS
2396 * since (for MST case) MST does this in its own context.
2398 mutex_lock(&aconnector->hpd_lock);
2400 #ifdef CONFIG_DRM_AMD_DC_HDCP
2401 if (adev->dm.hdcp_workqueue) {
2402 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2403 dm_con_state->update_hdcp = true;
2406 if (aconnector->fake_enable)
2407 aconnector->fake_enable = false;
2409 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2410 DRM_ERROR("KMS: Failed to detect connector\n");
2412 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2413 emulated_link_detect(aconnector->dc_link);
2416 drm_modeset_lock_all(dev);
2417 dm_restore_drm_connector_state(dev, connector);
2418 drm_modeset_unlock_all(dev);
2420 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2421 drm_kms_helper_hotplug_event(dev);
2423 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2424 if (new_connection_type == dc_connection_none &&
2425 aconnector->dc_link->type == dc_connection_none)
2426 dm_set_dpms_off(aconnector->dc_link);
2428 amdgpu_dm_update_connector_after_detect(aconnector);
2430 drm_modeset_lock_all(dev);
2431 dm_restore_drm_connector_state(dev, connector);
2432 drm_modeset_unlock_all(dev);
2434 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2435 drm_kms_helper_hotplug_event(dev);
2437 mutex_unlock(&aconnector->hpd_lock);
2441 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2443 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2445 bool new_irq_handled = false;
2447 int dpcd_bytes_to_read;
2449 const int max_process_count = 30;
2450 int process_count = 0;
2452 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2454 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2455 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2456 /* DPCD 0x200 - 0x201 for downstream IRQ */
2457 dpcd_addr = DP_SINK_COUNT;
2459 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2460 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2461 dpcd_addr = DP_SINK_COUNT_ESI;
2464 dret = drm_dp_dpcd_read(
2465 &aconnector->dm_dp_aux.aux,
2468 dpcd_bytes_to_read);
2470 while (dret == dpcd_bytes_to_read &&
2471 process_count < max_process_count) {
2477 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2478 /* handle HPD short pulse irq */
2479 if (aconnector->mst_mgr.mst_state)
2481 &aconnector->mst_mgr,
2485 if (new_irq_handled) {
2486 /* ACK at DPCD to notify down stream */
2487 const int ack_dpcd_bytes_to_write =
2488 dpcd_bytes_to_read - 1;
2490 for (retry = 0; retry < 3; retry++) {
2493 wret = drm_dp_dpcd_write(
2494 &aconnector->dm_dp_aux.aux,
2497 ack_dpcd_bytes_to_write);
2498 if (wret == ack_dpcd_bytes_to_write)
2502 /* check if there is new irq to be handled */
2503 dret = drm_dp_dpcd_read(
2504 &aconnector->dm_dp_aux.aux,
2507 dpcd_bytes_to_read);
2509 new_irq_handled = false;
2515 if (process_count == max_process_count)
2516 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2519 static void handle_hpd_rx_irq(void *param)
2521 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2522 struct drm_connector *connector = &aconnector->base;
2523 struct drm_device *dev = connector->dev;
2524 struct dc_link *dc_link = aconnector->dc_link;
2525 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2526 bool result = false;
2527 enum dc_connection_type new_connection_type = dc_connection_none;
2528 struct amdgpu_device *adev = drm_to_adev(dev);
2529 union hpd_irq_data hpd_irq_data;
2531 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2534 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2535 * conflict, after implement i2c helper, this mutex should be
2538 if (dc_link->type != dc_connection_mst_branch)
2539 mutex_lock(&aconnector->hpd_lock);
2541 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2543 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2544 (dc_link->type == dc_connection_mst_branch)) {
2545 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2547 dm_handle_hpd_rx_irq(aconnector);
2549 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2551 dm_handle_hpd_rx_irq(aconnector);
2556 mutex_lock(&adev->dm.dc_lock);
2557 #ifdef CONFIG_DRM_AMD_DC_HDCP
2558 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2560 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2562 mutex_unlock(&adev->dm.dc_lock);
2565 if (result && !is_mst_root_connector) {
2566 /* Downstream Port status changed. */
2567 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2568 DRM_ERROR("KMS: Failed to detect connector\n");
2570 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2571 emulated_link_detect(dc_link);
2573 if (aconnector->fake_enable)
2574 aconnector->fake_enable = false;
2576 amdgpu_dm_update_connector_after_detect(aconnector);
2579 drm_modeset_lock_all(dev);
2580 dm_restore_drm_connector_state(dev, connector);
2581 drm_modeset_unlock_all(dev);
2583 drm_kms_helper_hotplug_event(dev);
2584 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2586 if (aconnector->fake_enable)
2587 aconnector->fake_enable = false;
2589 amdgpu_dm_update_connector_after_detect(aconnector);
2592 drm_modeset_lock_all(dev);
2593 dm_restore_drm_connector_state(dev, connector);
2594 drm_modeset_unlock_all(dev);
2596 drm_kms_helper_hotplug_event(dev);
2599 #ifdef CONFIG_DRM_AMD_DC_HDCP
2600 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2601 if (adev->dm.hdcp_workqueue)
2602 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2606 if (dc_link->type != dc_connection_mst_branch) {
2607 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2608 mutex_unlock(&aconnector->hpd_lock);
2612 static void register_hpd_handlers(struct amdgpu_device *adev)
2614 struct drm_device *dev = adev_to_drm(adev);
2615 struct drm_connector *connector;
2616 struct amdgpu_dm_connector *aconnector;
2617 const struct dc_link *dc_link;
2618 struct dc_interrupt_params int_params = {0};
2620 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2621 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2623 list_for_each_entry(connector,
2624 &dev->mode_config.connector_list, head) {
2626 aconnector = to_amdgpu_dm_connector(connector);
2627 dc_link = aconnector->dc_link;
2629 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2630 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2631 int_params.irq_source = dc_link->irq_source_hpd;
2633 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2635 (void *) aconnector);
2638 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2640 /* Also register for DP short pulse (hpd_rx). */
2641 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2642 int_params.irq_source = dc_link->irq_source_hpd_rx;
2644 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2646 (void *) aconnector);
2651 #if defined(CONFIG_DRM_AMD_DC_SI)
2652 /* Register IRQ sources and initialize IRQ callbacks */
2653 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2655 struct dc *dc = adev->dm.dc;
2656 struct common_irq_params *c_irq_params;
2657 struct dc_interrupt_params int_params = {0};
2660 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2662 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2663 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2666 * Actions of amdgpu_irq_add_id():
2667 * 1. Register a set() function with base driver.
2668 * Base driver will call set() function to enable/disable an
2669 * interrupt in DC hardware.
2670 * 2. Register amdgpu_dm_irq_handler().
2671 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2672 * coming from DC hardware.
2673 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2674 * for acknowledging and handling. */
2676 /* Use VBLANK interrupt */
2677 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2678 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2680 DRM_ERROR("Failed to add crtc irq id!\n");
2684 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2685 int_params.irq_source =
2686 dc_interrupt_to_irq_source(dc, i+1 , 0);
2688 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2690 c_irq_params->adev = adev;
2691 c_irq_params->irq_src = int_params.irq_source;
2693 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2694 dm_crtc_high_irq, c_irq_params);
2697 /* Use GRPH_PFLIP interrupt */
2698 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2699 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2700 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2702 DRM_ERROR("Failed to add page flip irq id!\n");
2706 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2707 int_params.irq_source =
2708 dc_interrupt_to_irq_source(dc, i, 0);
2710 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2712 c_irq_params->adev = adev;
2713 c_irq_params->irq_src = int_params.irq_source;
2715 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2716 dm_pflip_high_irq, c_irq_params);
2721 r = amdgpu_irq_add_id(adev, client_id,
2722 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2724 DRM_ERROR("Failed to add hpd irq id!\n");
2728 register_hpd_handlers(adev);
2734 /* Register IRQ sources and initialize IRQ callbacks */
2735 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2737 struct dc *dc = adev->dm.dc;
2738 struct common_irq_params *c_irq_params;
2739 struct dc_interrupt_params int_params = {0};
2742 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2744 if (adev->asic_type >= CHIP_VEGA10)
2745 client_id = SOC15_IH_CLIENTID_DCE;
2747 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2748 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2751 * Actions of amdgpu_irq_add_id():
2752 * 1. Register a set() function with base driver.
2753 * Base driver will call set() function to enable/disable an
2754 * interrupt in DC hardware.
2755 * 2. Register amdgpu_dm_irq_handler().
2756 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2757 * coming from DC hardware.
2758 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2759 * for acknowledging and handling. */
2761 /* Use VBLANK interrupt */
2762 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2763 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2765 DRM_ERROR("Failed to add crtc irq id!\n");
2769 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2770 int_params.irq_source =
2771 dc_interrupt_to_irq_source(dc, i, 0);
2773 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2775 c_irq_params->adev = adev;
2776 c_irq_params->irq_src = int_params.irq_source;
2778 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2779 dm_crtc_high_irq, c_irq_params);
2782 /* Use VUPDATE interrupt */
2783 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2784 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2786 DRM_ERROR("Failed to add vupdate irq id!\n");
2790 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2791 int_params.irq_source =
2792 dc_interrupt_to_irq_source(dc, i, 0);
2794 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2796 c_irq_params->adev = adev;
2797 c_irq_params->irq_src = int_params.irq_source;
2799 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2800 dm_vupdate_high_irq, c_irq_params);
2803 /* Use GRPH_PFLIP interrupt */
2804 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2805 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2806 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2808 DRM_ERROR("Failed to add page flip irq id!\n");
2812 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2813 int_params.irq_source =
2814 dc_interrupt_to_irq_source(dc, i, 0);
2816 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2818 c_irq_params->adev = adev;
2819 c_irq_params->irq_src = int_params.irq_source;
2821 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2822 dm_pflip_high_irq, c_irq_params);
2827 r = amdgpu_irq_add_id(adev, client_id,
2828 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2830 DRM_ERROR("Failed to add hpd irq id!\n");
2834 register_hpd_handlers(adev);
2839 #if defined(CONFIG_DRM_AMD_DC_DCN)
2840 /* Register IRQ sources and initialize IRQ callbacks */
2841 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2843 struct dc *dc = adev->dm.dc;
2844 struct common_irq_params *c_irq_params;
2845 struct dc_interrupt_params int_params = {0};
2849 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2850 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2853 * Actions of amdgpu_irq_add_id():
2854 * 1. Register a set() function with base driver.
2855 * Base driver will call set() function to enable/disable an
2856 * interrupt in DC hardware.
2857 * 2. Register amdgpu_dm_irq_handler().
2858 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2859 * coming from DC hardware.
2860 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2861 * for acknowledging and handling.
2864 /* Use VSTARTUP interrupt */
2865 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2866 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2868 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2871 DRM_ERROR("Failed to add crtc irq id!\n");
2875 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2876 int_params.irq_source =
2877 dc_interrupt_to_irq_source(dc, i, 0);
2879 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2881 c_irq_params->adev = adev;
2882 c_irq_params->irq_src = int_params.irq_source;
2884 amdgpu_dm_irq_register_interrupt(
2885 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2888 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2889 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2890 * to trigger at end of each vblank, regardless of state of the lock,
2891 * matching DCE behaviour.
2893 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2894 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2896 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2899 DRM_ERROR("Failed to add vupdate irq id!\n");
2903 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2904 int_params.irq_source =
2905 dc_interrupt_to_irq_source(dc, i, 0);
2907 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2909 c_irq_params->adev = adev;
2910 c_irq_params->irq_src = int_params.irq_source;
2912 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2913 dm_vupdate_high_irq, c_irq_params);
2916 /* Use GRPH_PFLIP interrupt */
2917 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2918 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2920 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2922 DRM_ERROR("Failed to add page flip irq id!\n");
2926 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2927 int_params.irq_source =
2928 dc_interrupt_to_irq_source(dc, i, 0);
2930 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2932 c_irq_params->adev = adev;
2933 c_irq_params->irq_src = int_params.irq_source;
2935 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2936 dm_pflip_high_irq, c_irq_params);
2941 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2944 DRM_ERROR("Failed to add hpd irq id!\n");
2948 register_hpd_handlers(adev);
2955 * Acquires the lock for the atomic state object and returns
2956 * the new atomic state.
2958 * This should only be called during atomic check.
2960 static int dm_atomic_get_state(struct drm_atomic_state *state,
2961 struct dm_atomic_state **dm_state)
2963 struct drm_device *dev = state->dev;
2964 struct amdgpu_device *adev = drm_to_adev(dev);
2965 struct amdgpu_display_manager *dm = &adev->dm;
2966 struct drm_private_state *priv_state;
2971 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2972 if (IS_ERR(priv_state))
2973 return PTR_ERR(priv_state);
2975 *dm_state = to_dm_atomic_state(priv_state);
2980 static struct dm_atomic_state *
2981 dm_atomic_get_new_state(struct drm_atomic_state *state)
2983 struct drm_device *dev = state->dev;
2984 struct amdgpu_device *adev = drm_to_adev(dev);
2985 struct amdgpu_display_manager *dm = &adev->dm;
2986 struct drm_private_obj *obj;
2987 struct drm_private_state *new_obj_state;
2990 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2991 if (obj->funcs == dm->atomic_obj.funcs)
2992 return to_dm_atomic_state(new_obj_state);
2998 static struct drm_private_state *
2999 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3001 struct dm_atomic_state *old_state, *new_state;
3003 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3007 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3009 old_state = to_dm_atomic_state(obj->state);
3011 if (old_state && old_state->context)
3012 new_state->context = dc_copy_state(old_state->context);
3014 if (!new_state->context) {
3019 return &new_state->base;
3022 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3023 struct drm_private_state *state)
3025 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3027 if (dm_state && dm_state->context)
3028 dc_release_state(dm_state->context);
3033 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3034 .atomic_duplicate_state = dm_atomic_duplicate_state,
3035 .atomic_destroy_state = dm_atomic_destroy_state,
3038 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3040 struct dm_atomic_state *state;
3043 adev->mode_info.mode_config_initialized = true;
3045 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3046 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3048 adev_to_drm(adev)->mode_config.max_width = 16384;
3049 adev_to_drm(adev)->mode_config.max_height = 16384;
3051 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3052 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3053 /* indicates support for immediate flip */
3054 adev_to_drm(adev)->mode_config.async_page_flip = true;
3056 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3058 state = kzalloc(sizeof(*state), GFP_KERNEL);
3062 state->context = dc_create_state(adev->dm.dc);
3063 if (!state->context) {
3068 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3070 drm_atomic_private_obj_init(adev_to_drm(adev),
3071 &adev->dm.atomic_obj,
3073 &dm_atomic_state_funcs);
3075 r = amdgpu_display_modeset_create_props(adev);
3077 dc_release_state(state->context);
3082 r = amdgpu_dm_audio_init(adev);
3084 dc_release_state(state->context);
3092 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3093 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3094 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3096 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3097 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3099 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3101 #if defined(CONFIG_ACPI)
3102 struct amdgpu_dm_backlight_caps caps;
3104 memset(&caps, 0, sizeof(caps));
3106 if (dm->backlight_caps.caps_valid)
3109 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3110 if (caps.caps_valid) {
3111 dm->backlight_caps.caps_valid = true;
3112 if (caps.aux_support)
3114 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3115 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3117 dm->backlight_caps.min_input_signal =
3118 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3119 dm->backlight_caps.max_input_signal =
3120 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3123 if (dm->backlight_caps.aux_support)
3126 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3127 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3131 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3138 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3139 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3144 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3145 unsigned *min, unsigned *max)
3150 if (caps->aux_support) {
3151 // Firmware limits are in nits, DC API wants millinits.
3152 *max = 1000 * caps->aux_max_input_signal;
3153 *min = 1000 * caps->aux_min_input_signal;
3155 // Firmware limits are 8-bit, PWM control is 16-bit.
3156 *max = 0x101 * caps->max_input_signal;
3157 *min = 0x101 * caps->min_input_signal;
3162 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3163 uint32_t brightness)
3167 if (!get_brightness_range(caps, &min, &max))
3170 // Rescale 0..255 to min..max
3171 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3172 AMDGPU_MAX_BL_LEVEL);
3175 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3176 uint32_t brightness)
3180 if (!get_brightness_range(caps, &min, &max))
3183 if (brightness < min)
3185 // Rescale min..max to 0..255
3186 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3190 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3192 struct amdgpu_display_manager *dm = bl_get_data(bd);
3193 struct amdgpu_dm_backlight_caps caps;
3194 struct dc_link *link = NULL;
3198 amdgpu_dm_update_backlight_caps(dm);
3199 caps = dm->backlight_caps;
3201 link = (struct dc_link *)dm->backlight_link;
3203 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3204 // Change brightness based on AUX property
3205 if (caps.aux_support)
3206 return set_backlight_via_aux(link, brightness);
3208 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3213 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3215 struct amdgpu_display_manager *dm = bl_get_data(bd);
3216 int ret = dc_link_get_backlight_level(dm->backlight_link);
3218 if (ret == DC_ERROR_UNEXPECTED)
3219 return bd->props.brightness;
3220 return convert_brightness_to_user(&dm->backlight_caps, ret);
3223 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3224 .options = BL_CORE_SUSPENDRESUME,
3225 .get_brightness = amdgpu_dm_backlight_get_brightness,
3226 .update_status = amdgpu_dm_backlight_update_status,
3230 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3233 struct backlight_properties props = { 0 };
3235 amdgpu_dm_update_backlight_caps(dm);
3237 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3238 props.brightness = AMDGPU_MAX_BL_LEVEL;
3239 props.type = BACKLIGHT_RAW;
3241 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3242 adev_to_drm(dm->adev)->primary->index);
3244 dm->backlight_dev = backlight_device_register(bl_name,
3245 adev_to_drm(dm->adev)->dev,
3247 &amdgpu_dm_backlight_ops,
3250 if (IS_ERR(dm->backlight_dev))
3251 DRM_ERROR("DM: Backlight registration failed!\n");
3253 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3258 static int initialize_plane(struct amdgpu_display_manager *dm,
3259 struct amdgpu_mode_info *mode_info, int plane_id,
3260 enum drm_plane_type plane_type,
3261 const struct dc_plane_cap *plane_cap)
3263 struct drm_plane *plane;
3264 unsigned long possible_crtcs;
3267 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3269 DRM_ERROR("KMS: Failed to allocate plane\n");
3272 plane->type = plane_type;
3275 * HACK: IGT tests expect that the primary plane for a CRTC
3276 * can only have one possible CRTC. Only expose support for
3277 * any CRTC if they're not going to be used as a primary plane
3278 * for a CRTC - like overlay or underlay planes.
3280 possible_crtcs = 1 << plane_id;
3281 if (plane_id >= dm->dc->caps.max_streams)
3282 possible_crtcs = 0xff;
3284 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3287 DRM_ERROR("KMS: Failed to initialize plane\n");
3293 mode_info->planes[plane_id] = plane;
3299 static void register_backlight_device(struct amdgpu_display_manager *dm,
3300 struct dc_link *link)
3302 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3303 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3305 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3306 link->type != dc_connection_none) {
3308 * Event if registration failed, we should continue with
3309 * DM initialization because not having a backlight control
3310 * is better then a black screen.
3312 amdgpu_dm_register_backlight_device(dm);
3314 if (dm->backlight_dev)
3315 dm->backlight_link = link;
3322 * In this architecture, the association
3323 * connector -> encoder -> crtc
3324 * id not really requried. The crtc and connector will hold the
3325 * display_index as an abstraction to use with DAL component
3327 * Returns 0 on success
3329 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3331 struct amdgpu_display_manager *dm = &adev->dm;
3333 struct amdgpu_dm_connector *aconnector = NULL;
3334 struct amdgpu_encoder *aencoder = NULL;
3335 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3337 int32_t primary_planes;
3338 enum dc_connection_type new_connection_type = dc_connection_none;
3339 const struct dc_plane_cap *plane;
3341 dm->display_indexes_num = dm->dc->caps.max_streams;
3342 /* Update the actual used number of crtc */
3343 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3345 link_cnt = dm->dc->caps.max_links;
3346 if (amdgpu_dm_mode_config_init(dm->adev)) {
3347 DRM_ERROR("DM: Failed to initialize mode config\n");
3351 /* There is one primary plane per CRTC */
3352 primary_planes = dm->dc->caps.max_streams;
3353 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3356 * Initialize primary planes, implicit planes for legacy IOCTLS.
3357 * Order is reversed to match iteration order in atomic check.
3359 for (i = (primary_planes - 1); i >= 0; i--) {
3360 plane = &dm->dc->caps.planes[i];
3362 if (initialize_plane(dm, mode_info, i,
3363 DRM_PLANE_TYPE_PRIMARY, plane)) {
3364 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3370 * Initialize overlay planes, index starting after primary planes.
3371 * These planes have a higher DRM index than the primary planes since
3372 * they should be considered as having a higher z-order.
3373 * Order is reversed to match iteration order in atomic check.
3375 * Only support DCN for now, and only expose one so we don't encourage
3376 * userspace to use up all the pipes.
3378 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3379 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3381 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3384 if (!plane->blends_with_above || !plane->blends_with_below)
3387 if (!plane->pixel_format_support.argb8888)
3390 if (initialize_plane(dm, NULL, primary_planes + i,
3391 DRM_PLANE_TYPE_OVERLAY, plane)) {
3392 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3396 /* Only create one overlay plane. */
3400 for (i = 0; i < dm->dc->caps.max_streams; i++)
3401 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3402 DRM_ERROR("KMS: Failed to initialize crtc\n");
3406 /* loops over all connectors on the board */
3407 for (i = 0; i < link_cnt; i++) {
3408 struct dc_link *link = NULL;
3410 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3412 "KMS: Cannot support more than %d display indexes\n",
3413 AMDGPU_DM_MAX_DISPLAY_INDEX);
3417 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3421 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3425 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3426 DRM_ERROR("KMS: Failed to initialize encoder\n");
3430 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3431 DRM_ERROR("KMS: Failed to initialize connector\n");
3435 link = dc_get_link_at_index(dm->dc, i);
3437 if (!dc_link_detect_sink(link, &new_connection_type))
3438 DRM_ERROR("KMS: Failed to detect connector\n");
3440 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3441 emulated_link_detect(link);
3442 amdgpu_dm_update_connector_after_detect(aconnector);
3444 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3445 amdgpu_dm_update_connector_after_detect(aconnector);
3446 register_backlight_device(dm, link);
3447 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3448 amdgpu_dm_set_psr_caps(link);
3454 /* Software is initialized. Now we can register interrupt handlers. */
3455 switch (adev->asic_type) {
3456 #if defined(CONFIG_DRM_AMD_DC_SI)
3461 if (dce60_register_irq_handlers(dm->adev)) {
3462 DRM_ERROR("DM: Failed to initialize IRQ\n");
3476 case CHIP_POLARIS11:
3477 case CHIP_POLARIS10:
3478 case CHIP_POLARIS12:
3483 if (dce110_register_irq_handlers(dm->adev)) {
3484 DRM_ERROR("DM: Failed to initialize IRQ\n");
3488 #if defined(CONFIG_DRM_AMD_DC_DCN)
3494 case CHIP_SIENNA_CICHLID:
3495 case CHIP_NAVY_FLOUNDER:
3496 case CHIP_DIMGREY_CAVEFISH:
3498 if (dcn10_register_irq_handlers(dm->adev)) {
3499 DRM_ERROR("DM: Failed to initialize IRQ\n");
3505 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3517 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3519 drm_mode_config_cleanup(dm->ddev);
3520 drm_atomic_private_obj_fini(&dm->atomic_obj);
3524 /******************************************************************************
3525 * amdgpu_display_funcs functions
3526 *****************************************************************************/
3529 * dm_bandwidth_update - program display watermarks
3531 * @adev: amdgpu_device pointer
3533 * Calculate and program the display watermarks and line buffer allocation.
3535 static void dm_bandwidth_update(struct amdgpu_device *adev)
3537 /* TODO: implement later */
3540 static const struct amdgpu_display_funcs dm_display_funcs = {
3541 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3542 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3543 .backlight_set_level = NULL, /* never called for DC */
3544 .backlight_get_level = NULL, /* never called for DC */
3545 .hpd_sense = NULL,/* called unconditionally */
3546 .hpd_set_polarity = NULL, /* called unconditionally */
3547 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3548 .page_flip_get_scanoutpos =
3549 dm_crtc_get_scanoutpos,/* called unconditionally */
3550 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3551 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3554 #if defined(CONFIG_DEBUG_KERNEL_DC)
3556 static ssize_t s3_debug_store(struct device *device,
3557 struct device_attribute *attr,
3563 struct drm_device *drm_dev = dev_get_drvdata(device);
3564 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3566 ret = kstrtoint(buf, 0, &s3_state);
3571 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3576 return ret == 0 ? count : 0;
3579 DEVICE_ATTR_WO(s3_debug);
3583 static int dm_early_init(void *handle)
3585 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3587 switch (adev->asic_type) {
3588 #if defined(CONFIG_DRM_AMD_DC_SI)
3592 adev->mode_info.num_crtc = 6;
3593 adev->mode_info.num_hpd = 6;
3594 adev->mode_info.num_dig = 6;
3597 adev->mode_info.num_crtc = 2;
3598 adev->mode_info.num_hpd = 2;
3599 adev->mode_info.num_dig = 2;
3604 adev->mode_info.num_crtc = 6;
3605 adev->mode_info.num_hpd = 6;
3606 adev->mode_info.num_dig = 6;
3609 adev->mode_info.num_crtc = 4;
3610 adev->mode_info.num_hpd = 6;
3611 adev->mode_info.num_dig = 7;
3615 adev->mode_info.num_crtc = 2;
3616 adev->mode_info.num_hpd = 6;
3617 adev->mode_info.num_dig = 6;
3621 adev->mode_info.num_crtc = 6;
3622 adev->mode_info.num_hpd = 6;
3623 adev->mode_info.num_dig = 7;
3626 adev->mode_info.num_crtc = 3;
3627 adev->mode_info.num_hpd = 6;
3628 adev->mode_info.num_dig = 9;
3631 adev->mode_info.num_crtc = 2;
3632 adev->mode_info.num_hpd = 6;
3633 adev->mode_info.num_dig = 9;
3635 case CHIP_POLARIS11:
3636 case CHIP_POLARIS12:
3637 adev->mode_info.num_crtc = 5;
3638 adev->mode_info.num_hpd = 5;
3639 adev->mode_info.num_dig = 5;
3641 case CHIP_POLARIS10:
3643 adev->mode_info.num_crtc = 6;
3644 adev->mode_info.num_hpd = 6;
3645 adev->mode_info.num_dig = 6;
3650 adev->mode_info.num_crtc = 6;
3651 adev->mode_info.num_hpd = 6;
3652 adev->mode_info.num_dig = 6;
3654 #if defined(CONFIG_DRM_AMD_DC_DCN)
3658 adev->mode_info.num_crtc = 4;
3659 adev->mode_info.num_hpd = 4;
3660 adev->mode_info.num_dig = 4;
3664 case CHIP_SIENNA_CICHLID:
3665 case CHIP_NAVY_FLOUNDER:
3666 adev->mode_info.num_crtc = 6;
3667 adev->mode_info.num_hpd = 6;
3668 adev->mode_info.num_dig = 6;
3671 case CHIP_DIMGREY_CAVEFISH:
3672 adev->mode_info.num_crtc = 5;
3673 adev->mode_info.num_hpd = 5;
3674 adev->mode_info.num_dig = 5;
3678 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3682 amdgpu_dm_set_irq_funcs(adev);
3684 if (adev->mode_info.funcs == NULL)
3685 adev->mode_info.funcs = &dm_display_funcs;
3688 * Note: Do NOT change adev->audio_endpt_rreg and
3689 * adev->audio_endpt_wreg because they are initialised in
3690 * amdgpu_device_init()
3692 #if defined(CONFIG_DEBUG_KERNEL_DC)
3694 adev_to_drm(adev)->dev,
3695 &dev_attr_s3_debug);
3701 static bool modeset_required(struct drm_crtc_state *crtc_state,
3702 struct dc_stream_state *new_stream,
3703 struct dc_stream_state *old_stream)
3705 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3708 static bool modereset_required(struct drm_crtc_state *crtc_state)
3710 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3713 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3715 drm_encoder_cleanup(encoder);
3719 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3720 .destroy = amdgpu_dm_encoder_destroy,
3724 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3725 struct drm_framebuffer *fb,
3726 int *min_downscale, int *max_upscale)
3728 struct amdgpu_device *adev = drm_to_adev(dev);
3729 struct dc *dc = adev->dm.dc;
3730 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3731 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3733 switch (fb->format->format) {
3734 case DRM_FORMAT_P010:
3735 case DRM_FORMAT_NV12:
3736 case DRM_FORMAT_NV21:
3737 *max_upscale = plane_cap->max_upscale_factor.nv12;
3738 *min_downscale = plane_cap->max_downscale_factor.nv12;
3741 case DRM_FORMAT_XRGB16161616F:
3742 case DRM_FORMAT_ARGB16161616F:
3743 case DRM_FORMAT_XBGR16161616F:
3744 case DRM_FORMAT_ABGR16161616F:
3745 *max_upscale = plane_cap->max_upscale_factor.fp16;
3746 *min_downscale = plane_cap->max_downscale_factor.fp16;
3750 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3751 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3756 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3757 * scaling factor of 1.0 == 1000 units.
3759 if (*max_upscale == 1)
3760 *max_upscale = 1000;
3762 if (*min_downscale == 1)
3763 *min_downscale = 1000;
3767 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3768 struct dc_scaling_info *scaling_info)
3770 int scale_w, scale_h, min_downscale, max_upscale;
3772 memset(scaling_info, 0, sizeof(*scaling_info));
3774 /* Source is fixed 16.16 but we ignore mantissa for now... */
3775 scaling_info->src_rect.x = state->src_x >> 16;
3776 scaling_info->src_rect.y = state->src_y >> 16;
3778 scaling_info->src_rect.width = state->src_w >> 16;
3779 if (scaling_info->src_rect.width == 0)
3782 scaling_info->src_rect.height = state->src_h >> 16;
3783 if (scaling_info->src_rect.height == 0)
3786 scaling_info->dst_rect.x = state->crtc_x;
3787 scaling_info->dst_rect.y = state->crtc_y;
3789 if (state->crtc_w == 0)
3792 scaling_info->dst_rect.width = state->crtc_w;
3794 if (state->crtc_h == 0)
3797 scaling_info->dst_rect.height = state->crtc_h;
3799 /* DRM doesn't specify clipping on destination output. */
3800 scaling_info->clip_rect = scaling_info->dst_rect;
3802 /* Validate scaling per-format with DC plane caps */
3803 if (state->plane && state->plane->dev && state->fb) {
3804 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3805 &min_downscale, &max_upscale);
3807 min_downscale = 250;
3808 max_upscale = 16000;
3811 scale_w = scaling_info->dst_rect.width * 1000 /
3812 scaling_info->src_rect.width;
3814 if (scale_w < min_downscale || scale_w > max_upscale)
3817 scale_h = scaling_info->dst_rect.height * 1000 /
3818 scaling_info->src_rect.height;
3820 if (scale_h < min_downscale || scale_h > max_upscale)
3824 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3825 * assume reasonable defaults based on the format.
3832 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3833 uint64_t tiling_flags)
3835 /* Fill GFX8 params */
3836 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3837 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3839 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3840 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3841 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3842 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3843 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3845 /* XXX fix me for VI */
3846 tiling_info->gfx8.num_banks = num_banks;
3847 tiling_info->gfx8.array_mode =
3848 DC_ARRAY_2D_TILED_THIN1;
3849 tiling_info->gfx8.tile_split = tile_split;
3850 tiling_info->gfx8.bank_width = bankw;
3851 tiling_info->gfx8.bank_height = bankh;
3852 tiling_info->gfx8.tile_aspect = mtaspect;
3853 tiling_info->gfx8.tile_mode =
3854 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3855 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3856 == DC_ARRAY_1D_TILED_THIN1) {
3857 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3860 tiling_info->gfx8.pipe_config =
3861 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3865 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3866 union dc_tiling_info *tiling_info)
3868 tiling_info->gfx9.num_pipes =
3869 adev->gfx.config.gb_addr_config_fields.num_pipes;
3870 tiling_info->gfx9.num_banks =
3871 adev->gfx.config.gb_addr_config_fields.num_banks;
3872 tiling_info->gfx9.pipe_interleave =
3873 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3874 tiling_info->gfx9.num_shader_engines =
3875 adev->gfx.config.gb_addr_config_fields.num_se;
3876 tiling_info->gfx9.max_compressed_frags =
3877 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3878 tiling_info->gfx9.num_rb_per_se =
3879 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3880 tiling_info->gfx9.shaderEnable = 1;
3881 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3882 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3883 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3884 adev->asic_type == CHIP_VANGOGH)
3885 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3889 validate_dcc(struct amdgpu_device *adev,
3890 const enum surface_pixel_format format,
3891 const enum dc_rotation_angle rotation,
3892 const union dc_tiling_info *tiling_info,
3893 const struct dc_plane_dcc_param *dcc,
3894 const struct dc_plane_address *address,
3895 const struct plane_size *plane_size)
3897 struct dc *dc = adev->dm.dc;
3898 struct dc_dcc_surface_param input;
3899 struct dc_surface_dcc_cap output;
3901 memset(&input, 0, sizeof(input));
3902 memset(&output, 0, sizeof(output));
3907 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3908 !dc->cap_funcs.get_dcc_compression_cap)
3911 input.format = format;
3912 input.surface_size.width = plane_size->surface_size.width;
3913 input.surface_size.height = plane_size->surface_size.height;
3914 input.swizzle_mode = tiling_info->gfx9.swizzle;
3916 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3917 input.scan = SCAN_DIRECTION_HORIZONTAL;
3918 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3919 input.scan = SCAN_DIRECTION_VERTICAL;
3921 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3924 if (!output.capable)
3927 if (dcc->independent_64b_blks == 0 &&
3928 output.grph.rgb.independent_64b_blks != 0)
3935 modifier_has_dcc(uint64_t modifier)
3937 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3941 modifier_gfx9_swizzle_mode(uint64_t modifier)
3943 if (modifier == DRM_FORMAT_MOD_LINEAR)
3946 return AMD_FMT_MOD_GET(TILE, modifier);
3949 static const struct drm_format_info *
3950 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3952 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
3956 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3957 union dc_tiling_info *tiling_info,
3960 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3961 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3962 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
3963 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
3965 fill_gfx9_tiling_info_from_device(adev, tiling_info);
3967 if (!IS_AMD_FMT_MOD(modifier))
3970 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
3971 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
3973 if (adev->family >= AMDGPU_FAMILY_NV) {
3974 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
3976 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
3978 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
3982 enum dm_micro_swizzle {
3983 MICRO_SWIZZLE_Z = 0,
3984 MICRO_SWIZZLE_S = 1,
3985 MICRO_SWIZZLE_D = 2,
3989 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
3993 struct amdgpu_device *adev = drm_to_adev(plane->dev);
3994 const struct drm_format_info *info = drm_format_info(format);
3996 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4002 * We always have to allow this modifier, because core DRM still
4003 * checks LINEAR support if userspace does not provide modifers.
4005 if (modifier == DRM_FORMAT_MOD_LINEAR)
4009 * The arbitrary tiling support for multiplane formats has not been hooked
4012 if (info->num_planes > 1)
4016 * For D swizzle the canonical modifier depends on the bpp, so check
4019 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4020 adev->family >= AMDGPU_FAMILY_NV) {
4021 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4025 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4029 if (modifier_has_dcc(modifier)) {
4030 /* Per radeonsi comments 16/64 bpp are more complicated. */
4031 if (info->cpp[0] != 4)
4039 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4044 if (*cap - *size < 1) {
4045 uint64_t new_cap = *cap * 2;
4046 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4054 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4060 (*mods)[*size] = mod;
4065 add_gfx9_modifiers(const struct amdgpu_device *adev,
4066 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4068 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4069 int pipe_xor_bits = min(8, pipes +
4070 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4071 int bank_xor_bits = min(8 - pipe_xor_bits,
4072 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4073 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4074 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4077 if (adev->family == AMDGPU_FAMILY_RV) {
4078 /* Raven2 and later */
4079 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4082 * No _D DCC swizzles yet because we only allow 32bpp, which
4083 * doesn't support _D on DCN
4086 if (has_constant_encode) {
4087 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4088 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4089 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4090 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4091 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4092 AMD_FMT_MOD_SET(DCC, 1) |
4093 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4094 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4095 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4098 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4099 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4100 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4101 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4102 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4103 AMD_FMT_MOD_SET(DCC, 1) |
4104 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4105 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4106 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4108 if (has_constant_encode) {
4109 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4110 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4111 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4112 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4113 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4114 AMD_FMT_MOD_SET(DCC, 1) |
4115 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4116 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4117 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4119 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4120 AMD_FMT_MOD_SET(RB, rb) |
4121 AMD_FMT_MOD_SET(PIPE, pipes));
4124 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4125 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4126 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4127 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4128 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4129 AMD_FMT_MOD_SET(DCC, 1) |
4130 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4131 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4132 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4133 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4134 AMD_FMT_MOD_SET(RB, rb) |
4135 AMD_FMT_MOD_SET(PIPE, pipes));
4139 * Only supported for 64bpp on Raven, will be filtered on format in
4140 * dm_plane_format_mod_supported.
4142 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4143 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4144 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4145 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4146 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4148 if (adev->family == AMDGPU_FAMILY_RV) {
4149 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4150 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4151 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4152 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4153 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4157 * Only supported for 64bpp on Raven, will be filtered on format in
4158 * dm_plane_format_mod_supported.
4160 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4161 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4162 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4164 if (adev->family == AMDGPU_FAMILY_RV) {
4165 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4166 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4167 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4172 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4173 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4175 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4177 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4178 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4179 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4180 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4181 AMD_FMT_MOD_SET(DCC, 1) |
4182 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4183 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4184 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4186 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4187 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4188 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4189 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4190 AMD_FMT_MOD_SET(DCC, 1) |
4191 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4192 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4193 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4194 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4196 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4197 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4198 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4199 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4201 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4202 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4203 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4204 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4207 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4208 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4209 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4210 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4212 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4213 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4214 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4218 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4219 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4221 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4222 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4224 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4225 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4226 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4227 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4228 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4229 AMD_FMT_MOD_SET(DCC, 1) |
4230 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4231 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4232 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4233 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4235 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4236 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4237 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4238 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4239 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4240 AMD_FMT_MOD_SET(DCC, 1) |
4241 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4242 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4243 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4244 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4245 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4247 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4248 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4249 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4250 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4251 AMD_FMT_MOD_SET(PACKERS, pkrs));
4253 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4254 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4255 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4256 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4257 AMD_FMT_MOD_SET(PACKERS, pkrs));
4259 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4260 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4261 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4262 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4264 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4265 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4266 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4270 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4272 uint64_t size = 0, capacity = 128;
4275 /* We have not hooked up any pre-GFX9 modifiers. */
4276 if (adev->family < AMDGPU_FAMILY_AI)
4279 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4281 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4282 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4283 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4284 return *mods ? 0 : -ENOMEM;
4287 switch (adev->family) {
4288 case AMDGPU_FAMILY_AI:
4289 case AMDGPU_FAMILY_RV:
4290 add_gfx9_modifiers(adev, mods, &size, &capacity);
4292 case AMDGPU_FAMILY_NV:
4293 case AMDGPU_FAMILY_VGH:
4294 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4295 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4297 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4301 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4303 /* INVALID marks the end of the list. */
4304 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4313 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4314 const struct amdgpu_framebuffer *afb,
4315 const enum surface_pixel_format format,
4316 const enum dc_rotation_angle rotation,
4317 const struct plane_size *plane_size,
4318 union dc_tiling_info *tiling_info,
4319 struct dc_plane_dcc_param *dcc,
4320 struct dc_plane_address *address,
4321 const bool force_disable_dcc)
4323 const uint64_t modifier = afb->base.modifier;
4326 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4327 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4329 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4330 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4333 dcc->meta_pitch = afb->base.pitches[1];
4334 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4336 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4337 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4340 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4348 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4349 const struct amdgpu_framebuffer *afb,
4350 const enum surface_pixel_format format,
4351 const enum dc_rotation_angle rotation,
4352 const uint64_t tiling_flags,
4353 union dc_tiling_info *tiling_info,
4354 struct plane_size *plane_size,
4355 struct dc_plane_dcc_param *dcc,
4356 struct dc_plane_address *address,
4358 bool force_disable_dcc)
4360 const struct drm_framebuffer *fb = &afb->base;
4363 memset(tiling_info, 0, sizeof(*tiling_info));
4364 memset(plane_size, 0, sizeof(*plane_size));
4365 memset(dcc, 0, sizeof(*dcc));
4366 memset(address, 0, sizeof(*address));
4368 address->tmz_surface = tmz_surface;
4370 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4371 uint64_t addr = afb->address + fb->offsets[0];
4373 plane_size->surface_size.x = 0;
4374 plane_size->surface_size.y = 0;
4375 plane_size->surface_size.width = fb->width;
4376 plane_size->surface_size.height = fb->height;
4377 plane_size->surface_pitch =
4378 fb->pitches[0] / fb->format->cpp[0];
4380 address->type = PLN_ADDR_TYPE_GRAPHICS;
4381 address->grph.addr.low_part = lower_32_bits(addr);
4382 address->grph.addr.high_part = upper_32_bits(addr);
4383 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4384 uint64_t luma_addr = afb->address + fb->offsets[0];
4385 uint64_t chroma_addr = afb->address + fb->offsets[1];
4387 plane_size->surface_size.x = 0;
4388 plane_size->surface_size.y = 0;
4389 plane_size->surface_size.width = fb->width;
4390 plane_size->surface_size.height = fb->height;
4391 plane_size->surface_pitch =
4392 fb->pitches[0] / fb->format->cpp[0];
4394 plane_size->chroma_size.x = 0;
4395 plane_size->chroma_size.y = 0;
4396 /* TODO: set these based on surface format */
4397 plane_size->chroma_size.width = fb->width / 2;
4398 plane_size->chroma_size.height = fb->height / 2;
4400 plane_size->chroma_pitch =
4401 fb->pitches[1] / fb->format->cpp[1];
4403 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4404 address->video_progressive.luma_addr.low_part =
4405 lower_32_bits(luma_addr);
4406 address->video_progressive.luma_addr.high_part =
4407 upper_32_bits(luma_addr);
4408 address->video_progressive.chroma_addr.low_part =
4409 lower_32_bits(chroma_addr);
4410 address->video_progressive.chroma_addr.high_part =
4411 upper_32_bits(chroma_addr);
4414 if (adev->family >= AMDGPU_FAMILY_AI) {
4415 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4416 rotation, plane_size,
4423 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4430 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4431 bool *per_pixel_alpha, bool *global_alpha,
4432 int *global_alpha_value)
4434 *per_pixel_alpha = false;
4435 *global_alpha = false;
4436 *global_alpha_value = 0xff;
4438 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4441 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4442 static const uint32_t alpha_formats[] = {
4443 DRM_FORMAT_ARGB8888,
4444 DRM_FORMAT_RGBA8888,
4445 DRM_FORMAT_ABGR8888,
4447 uint32_t format = plane_state->fb->format->format;
4450 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4451 if (format == alpha_formats[i]) {
4452 *per_pixel_alpha = true;
4458 if (plane_state->alpha < 0xffff) {
4459 *global_alpha = true;
4460 *global_alpha_value = plane_state->alpha >> 8;
4465 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4466 const enum surface_pixel_format format,
4467 enum dc_color_space *color_space)
4471 *color_space = COLOR_SPACE_SRGB;
4473 /* DRM color properties only affect non-RGB formats. */
4474 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4477 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4479 switch (plane_state->color_encoding) {
4480 case DRM_COLOR_YCBCR_BT601:
4482 *color_space = COLOR_SPACE_YCBCR601;
4484 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4487 case DRM_COLOR_YCBCR_BT709:
4489 *color_space = COLOR_SPACE_YCBCR709;
4491 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4494 case DRM_COLOR_YCBCR_BT2020:
4496 *color_space = COLOR_SPACE_2020_YCBCR;
4509 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4510 const struct drm_plane_state *plane_state,
4511 const uint64_t tiling_flags,
4512 struct dc_plane_info *plane_info,
4513 struct dc_plane_address *address,
4515 bool force_disable_dcc)
4517 const struct drm_framebuffer *fb = plane_state->fb;
4518 const struct amdgpu_framebuffer *afb =
4519 to_amdgpu_framebuffer(plane_state->fb);
4520 struct drm_format_name_buf format_name;
4523 memset(plane_info, 0, sizeof(*plane_info));
4525 switch (fb->format->format) {
4527 plane_info->format =
4528 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4530 case DRM_FORMAT_RGB565:
4531 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4533 case DRM_FORMAT_XRGB8888:
4534 case DRM_FORMAT_ARGB8888:
4535 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4537 case DRM_FORMAT_XRGB2101010:
4538 case DRM_FORMAT_ARGB2101010:
4539 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4541 case DRM_FORMAT_XBGR2101010:
4542 case DRM_FORMAT_ABGR2101010:
4543 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4545 case DRM_FORMAT_XBGR8888:
4546 case DRM_FORMAT_ABGR8888:
4547 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4549 case DRM_FORMAT_NV21:
4550 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4552 case DRM_FORMAT_NV12:
4553 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4555 case DRM_FORMAT_P010:
4556 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4558 case DRM_FORMAT_XRGB16161616F:
4559 case DRM_FORMAT_ARGB16161616F:
4560 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4562 case DRM_FORMAT_XBGR16161616F:
4563 case DRM_FORMAT_ABGR16161616F:
4564 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4568 "Unsupported screen format %s\n",
4569 drm_get_format_name(fb->format->format, &format_name));
4573 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4574 case DRM_MODE_ROTATE_0:
4575 plane_info->rotation = ROTATION_ANGLE_0;
4577 case DRM_MODE_ROTATE_90:
4578 plane_info->rotation = ROTATION_ANGLE_90;
4580 case DRM_MODE_ROTATE_180:
4581 plane_info->rotation = ROTATION_ANGLE_180;
4583 case DRM_MODE_ROTATE_270:
4584 plane_info->rotation = ROTATION_ANGLE_270;
4587 plane_info->rotation = ROTATION_ANGLE_0;
4591 plane_info->visible = true;
4592 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4594 plane_info->layer_index = 0;
4596 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4597 &plane_info->color_space);
4601 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4602 plane_info->rotation, tiling_flags,
4603 &plane_info->tiling_info,
4604 &plane_info->plane_size,
4605 &plane_info->dcc, address, tmz_surface,
4610 fill_blending_from_plane_state(
4611 plane_state, &plane_info->per_pixel_alpha,
4612 &plane_info->global_alpha, &plane_info->global_alpha_value);
4617 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4618 struct dc_plane_state *dc_plane_state,
4619 struct drm_plane_state *plane_state,
4620 struct drm_crtc_state *crtc_state)
4622 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4623 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4624 struct dc_scaling_info scaling_info;
4625 struct dc_plane_info plane_info;
4627 bool force_disable_dcc = false;
4629 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4633 dc_plane_state->src_rect = scaling_info.src_rect;
4634 dc_plane_state->dst_rect = scaling_info.dst_rect;
4635 dc_plane_state->clip_rect = scaling_info.clip_rect;
4636 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4638 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4639 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4642 &dc_plane_state->address,
4648 dc_plane_state->format = plane_info.format;
4649 dc_plane_state->color_space = plane_info.color_space;
4650 dc_plane_state->format = plane_info.format;
4651 dc_plane_state->plane_size = plane_info.plane_size;
4652 dc_plane_state->rotation = plane_info.rotation;
4653 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4654 dc_plane_state->stereo_format = plane_info.stereo_format;
4655 dc_plane_state->tiling_info = plane_info.tiling_info;
4656 dc_plane_state->visible = plane_info.visible;
4657 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4658 dc_plane_state->global_alpha = plane_info.global_alpha;
4659 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4660 dc_plane_state->dcc = plane_info.dcc;
4661 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4664 * Always set input transfer function, since plane state is refreshed
4667 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4674 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4675 const struct dm_connector_state *dm_state,
4676 struct dc_stream_state *stream)
4678 enum amdgpu_rmx_type rmx_type;
4680 struct rect src = { 0 }; /* viewport in composition space*/
4681 struct rect dst = { 0 }; /* stream addressable area */
4683 /* no mode. nothing to be done */
4687 /* Full screen scaling by default */
4688 src.width = mode->hdisplay;
4689 src.height = mode->vdisplay;
4690 dst.width = stream->timing.h_addressable;
4691 dst.height = stream->timing.v_addressable;
4694 rmx_type = dm_state->scaling;
4695 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4696 if (src.width * dst.height <
4697 src.height * dst.width) {
4698 /* height needs less upscaling/more downscaling */
4699 dst.width = src.width *
4700 dst.height / src.height;
4702 /* width needs less upscaling/more downscaling */
4703 dst.height = src.height *
4704 dst.width / src.width;
4706 } else if (rmx_type == RMX_CENTER) {
4710 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4711 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4713 if (dm_state->underscan_enable) {
4714 dst.x += dm_state->underscan_hborder / 2;
4715 dst.y += dm_state->underscan_vborder / 2;
4716 dst.width -= dm_state->underscan_hborder;
4717 dst.height -= dm_state->underscan_vborder;
4724 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4725 dst.x, dst.y, dst.width, dst.height);
4729 static enum dc_color_depth
4730 convert_color_depth_from_display_info(const struct drm_connector *connector,
4731 bool is_y420, int requested_bpc)
4738 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4739 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4741 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4743 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4746 bpc = (uint8_t)connector->display_info.bpc;
4747 /* Assume 8 bpc by default if no bpc is specified. */
4748 bpc = bpc ? bpc : 8;
4751 if (requested_bpc > 0) {
4753 * Cap display bpc based on the user requested value.
4755 * The value for state->max_bpc may not correctly updated
4756 * depending on when the connector gets added to the state
4757 * or if this was called outside of atomic check, so it
4758 * can't be used directly.
4760 bpc = min_t(u8, bpc, requested_bpc);
4762 /* Round down to the nearest even number. */
4763 bpc = bpc - (bpc & 1);
4769 * Temporary Work around, DRM doesn't parse color depth for
4770 * EDID revision before 1.4
4771 * TODO: Fix edid parsing
4773 return COLOR_DEPTH_888;
4775 return COLOR_DEPTH_666;
4777 return COLOR_DEPTH_888;
4779 return COLOR_DEPTH_101010;
4781 return COLOR_DEPTH_121212;
4783 return COLOR_DEPTH_141414;
4785 return COLOR_DEPTH_161616;
4787 return COLOR_DEPTH_UNDEFINED;
4791 static enum dc_aspect_ratio
4792 get_aspect_ratio(const struct drm_display_mode *mode_in)
4794 /* 1-1 mapping, since both enums follow the HDMI spec. */
4795 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4798 static enum dc_color_space
4799 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4801 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4803 switch (dc_crtc_timing->pixel_encoding) {
4804 case PIXEL_ENCODING_YCBCR422:
4805 case PIXEL_ENCODING_YCBCR444:
4806 case PIXEL_ENCODING_YCBCR420:
4809 * 27030khz is the separation point between HDTV and SDTV
4810 * according to HDMI spec, we use YCbCr709 and YCbCr601
4813 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4814 if (dc_crtc_timing->flags.Y_ONLY)
4816 COLOR_SPACE_YCBCR709_LIMITED;
4818 color_space = COLOR_SPACE_YCBCR709;
4820 if (dc_crtc_timing->flags.Y_ONLY)
4822 COLOR_SPACE_YCBCR601_LIMITED;
4824 color_space = COLOR_SPACE_YCBCR601;
4829 case PIXEL_ENCODING_RGB:
4830 color_space = COLOR_SPACE_SRGB;
4841 static bool adjust_colour_depth_from_display_info(
4842 struct dc_crtc_timing *timing_out,
4843 const struct drm_display_info *info)
4845 enum dc_color_depth depth = timing_out->display_color_depth;
4848 normalized_clk = timing_out->pix_clk_100hz / 10;
4849 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4850 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4851 normalized_clk /= 2;
4852 /* Adjusting pix clock following on HDMI spec based on colour depth */
4854 case COLOR_DEPTH_888:
4856 case COLOR_DEPTH_101010:
4857 normalized_clk = (normalized_clk * 30) / 24;
4859 case COLOR_DEPTH_121212:
4860 normalized_clk = (normalized_clk * 36) / 24;
4862 case COLOR_DEPTH_161616:
4863 normalized_clk = (normalized_clk * 48) / 24;
4866 /* The above depths are the only ones valid for HDMI. */
4869 if (normalized_clk <= info->max_tmds_clock) {
4870 timing_out->display_color_depth = depth;
4873 } while (--depth > COLOR_DEPTH_666);
4877 static void fill_stream_properties_from_drm_display_mode(
4878 struct dc_stream_state *stream,
4879 const struct drm_display_mode *mode_in,
4880 const struct drm_connector *connector,
4881 const struct drm_connector_state *connector_state,
4882 const struct dc_stream_state *old_stream,
4885 struct dc_crtc_timing *timing_out = &stream->timing;
4886 const struct drm_display_info *info = &connector->display_info;
4887 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4888 struct hdmi_vendor_infoframe hv_frame;
4889 struct hdmi_avi_infoframe avi_frame;
4891 memset(&hv_frame, 0, sizeof(hv_frame));
4892 memset(&avi_frame, 0, sizeof(avi_frame));
4894 timing_out->h_border_left = 0;
4895 timing_out->h_border_right = 0;
4896 timing_out->v_border_top = 0;
4897 timing_out->v_border_bottom = 0;
4898 /* TODO: un-hardcode */
4899 if (drm_mode_is_420_only(info, mode_in)
4900 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4901 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4902 else if (drm_mode_is_420_also(info, mode_in)
4903 && aconnector->force_yuv420_output)
4904 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4905 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4906 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4907 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4909 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4911 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4912 timing_out->display_color_depth = convert_color_depth_from_display_info(
4914 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4916 timing_out->scan_type = SCANNING_TYPE_NODATA;
4917 timing_out->hdmi_vic = 0;
4920 timing_out->vic = old_stream->timing.vic;
4921 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4922 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4924 timing_out->vic = drm_match_cea_mode(mode_in);
4925 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4926 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4927 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4928 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4931 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4932 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4933 timing_out->vic = avi_frame.video_code;
4934 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4935 timing_out->hdmi_vic = hv_frame.vic;
4938 timing_out->h_addressable = mode_in->crtc_hdisplay;
4939 timing_out->h_total = mode_in->crtc_htotal;
4940 timing_out->h_sync_width =
4941 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4942 timing_out->h_front_porch =
4943 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4944 timing_out->v_total = mode_in->crtc_vtotal;
4945 timing_out->v_addressable = mode_in->crtc_vdisplay;
4946 timing_out->v_front_porch =
4947 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4948 timing_out->v_sync_width =
4949 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4950 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4951 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4953 stream->output_color_space = get_output_color_space(timing_out);
4955 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4956 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4957 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4958 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4959 drm_mode_is_420_also(info, mode_in) &&
4960 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4961 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4962 adjust_colour_depth_from_display_info(timing_out, info);
4967 static void fill_audio_info(struct audio_info *audio_info,
4968 const struct drm_connector *drm_connector,
4969 const struct dc_sink *dc_sink)
4972 int cea_revision = 0;
4973 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4975 audio_info->manufacture_id = edid_caps->manufacturer_id;
4976 audio_info->product_id = edid_caps->product_id;
4978 cea_revision = drm_connector->display_info.cea_rev;
4980 strscpy(audio_info->display_name,
4981 edid_caps->display_name,
4982 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4984 if (cea_revision >= 3) {
4985 audio_info->mode_count = edid_caps->audio_mode_count;
4987 for (i = 0; i < audio_info->mode_count; ++i) {
4988 audio_info->modes[i].format_code =
4989 (enum audio_format_code)
4990 (edid_caps->audio_modes[i].format_code);
4991 audio_info->modes[i].channel_count =
4992 edid_caps->audio_modes[i].channel_count;
4993 audio_info->modes[i].sample_rates.all =
4994 edid_caps->audio_modes[i].sample_rate;
4995 audio_info->modes[i].sample_size =
4996 edid_caps->audio_modes[i].sample_size;
5000 audio_info->flags.all = edid_caps->speaker_flags;
5002 /* TODO: We only check for the progressive mode, check for interlace mode too */
5003 if (drm_connector->latency_present[0]) {
5004 audio_info->video_latency = drm_connector->video_latency[0];
5005 audio_info->audio_latency = drm_connector->audio_latency[0];
5008 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5013 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5014 struct drm_display_mode *dst_mode)
5016 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5017 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5018 dst_mode->crtc_clock = src_mode->crtc_clock;
5019 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5020 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5021 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5022 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5023 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5024 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5025 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5026 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5027 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5028 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5029 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5033 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5034 const struct drm_display_mode *native_mode,
5037 if (scale_enabled) {
5038 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5039 } else if (native_mode->clock == drm_mode->clock &&
5040 native_mode->htotal == drm_mode->htotal &&
5041 native_mode->vtotal == drm_mode->vtotal) {
5042 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5044 /* no scaling nor amdgpu inserted, no need to patch */
5048 static struct dc_sink *
5049 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5051 struct dc_sink_init_data sink_init_data = { 0 };
5052 struct dc_sink *sink = NULL;
5053 sink_init_data.link = aconnector->dc_link;
5054 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5056 sink = dc_sink_create(&sink_init_data);
5058 DRM_ERROR("Failed to create sink!\n");
5061 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5066 static void set_multisync_trigger_params(
5067 struct dc_stream_state *stream)
5069 if (stream->triggered_crtc_reset.enabled) {
5070 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5071 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5075 static void set_master_stream(struct dc_stream_state *stream_set[],
5078 int j, highest_rfr = 0, master_stream = 0;
5080 for (j = 0; j < stream_count; j++) {
5081 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5082 int refresh_rate = 0;
5084 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5085 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5086 if (refresh_rate > highest_rfr) {
5087 highest_rfr = refresh_rate;
5092 for (j = 0; j < stream_count; j++) {
5094 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5098 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5102 if (context->stream_count < 2)
5104 for (i = 0; i < context->stream_count ; i++) {
5105 if (!context->streams[i])
5108 * TODO: add a function to read AMD VSDB bits and set
5109 * crtc_sync_master.multi_sync_enabled flag
5110 * For now it's set to false
5112 set_multisync_trigger_params(context->streams[i]);
5114 set_master_stream(context->streams, context->stream_count);
5117 static struct dc_stream_state *
5118 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5119 const struct drm_display_mode *drm_mode,
5120 const struct dm_connector_state *dm_state,
5121 const struct dc_stream_state *old_stream,
5124 struct drm_display_mode *preferred_mode = NULL;
5125 struct drm_connector *drm_connector;
5126 const struct drm_connector_state *con_state =
5127 dm_state ? &dm_state->base : NULL;
5128 struct dc_stream_state *stream = NULL;
5129 struct drm_display_mode mode = *drm_mode;
5130 bool native_mode_found = false;
5131 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5133 int preferred_refresh = 0;
5134 #if defined(CONFIG_DRM_AMD_DC_DCN)
5135 struct dsc_dec_dpcd_caps dsc_caps;
5136 uint32_t link_bandwidth_kbps;
5138 struct dc_sink *sink = NULL;
5139 if (aconnector == NULL) {
5140 DRM_ERROR("aconnector is NULL!\n");
5144 drm_connector = &aconnector->base;
5146 if (!aconnector->dc_sink) {
5147 sink = create_fake_sink(aconnector);
5151 sink = aconnector->dc_sink;
5152 dc_sink_retain(sink);
5155 stream = dc_create_stream_for_sink(sink);
5157 if (stream == NULL) {
5158 DRM_ERROR("Failed to create stream for sink!\n");
5162 stream->dm_stream_context = aconnector;
5164 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5165 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5167 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5168 /* Search for preferred mode */
5169 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5170 native_mode_found = true;
5174 if (!native_mode_found)
5175 preferred_mode = list_first_entry_or_null(
5176 &aconnector->base.modes,
5177 struct drm_display_mode,
5180 mode_refresh = drm_mode_vrefresh(&mode);
5182 if (preferred_mode == NULL) {
5184 * This may not be an error, the use case is when we have no
5185 * usermode calls to reset and set mode upon hotplug. In this
5186 * case, we call set mode ourselves to restore the previous mode
5187 * and the modelist may not be filled in in time.
5189 DRM_DEBUG_DRIVER("No preferred mode found\n");
5191 decide_crtc_timing_for_drm_display_mode(
5192 &mode, preferred_mode,
5193 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5194 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5198 drm_mode_set_crtcinfo(&mode, 0);
5201 * If scaling is enabled and refresh rate didn't change
5202 * we copy the vic and polarities of the old timings
5204 if (!scale || mode_refresh != preferred_refresh)
5205 fill_stream_properties_from_drm_display_mode(stream,
5206 &mode, &aconnector->base, con_state, NULL, requested_bpc);
5208 fill_stream_properties_from_drm_display_mode(stream,
5209 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
5211 stream->timing.flags.DSC = 0;
5213 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5214 #if defined(CONFIG_DRM_AMD_DC_DCN)
5215 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5216 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5217 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5219 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5220 dc_link_get_link_cap(aconnector->dc_link));
5222 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5223 /* Set DSC policy according to dsc_clock_en */
5224 dc_dsc_policy_set_enable_dsc_when_not_needed(
5225 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5227 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5229 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5231 link_bandwidth_kbps,
5233 &stream->timing.dsc_cfg))
5234 stream->timing.flags.DSC = 1;
5235 /* Overwrite the stream flag if DSC is enabled through debugfs */
5236 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5237 stream->timing.flags.DSC = 1;
5239 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5240 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5242 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5243 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5245 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5246 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5251 update_stream_scaling_settings(&mode, dm_state, stream);
5254 &stream->audio_info,
5258 update_stream_signal(stream, sink);
5260 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5261 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5263 if (stream->link->psr_settings.psr_feature_enabled) {
5265 // should decide stream support vsc sdp colorimetry capability
5266 // before building vsc info packet
5268 stream->use_vsc_sdp_for_colorimetry = false;
5269 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5270 stream->use_vsc_sdp_for_colorimetry =
5271 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5273 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5274 stream->use_vsc_sdp_for_colorimetry = true;
5276 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5279 dc_sink_release(sink);
5284 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5286 drm_crtc_cleanup(crtc);
5290 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5291 struct drm_crtc_state *state)
5293 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5295 /* TODO Destroy dc_stream objects are stream object is flattened */
5297 dc_stream_release(cur->stream);
5300 __drm_atomic_helper_crtc_destroy_state(state);
5306 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5308 struct dm_crtc_state *state;
5311 dm_crtc_destroy_state(crtc, crtc->state);
5313 state = kzalloc(sizeof(*state), GFP_KERNEL);
5314 if (WARN_ON(!state))
5317 __drm_atomic_helper_crtc_reset(crtc, &state->base);
5320 static struct drm_crtc_state *
5321 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5323 struct dm_crtc_state *state, *cur;
5325 cur = to_dm_crtc_state(crtc->state);
5327 if (WARN_ON(!crtc->state))
5330 state = kzalloc(sizeof(*state), GFP_KERNEL);
5334 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5337 state->stream = cur->stream;
5338 dc_stream_retain(state->stream);
5341 state->active_planes = cur->active_planes;
5342 state->vrr_infopacket = cur->vrr_infopacket;
5343 state->abm_level = cur->abm_level;
5344 state->vrr_supported = cur->vrr_supported;
5345 state->freesync_config = cur->freesync_config;
5346 state->crc_src = cur->crc_src;
5347 state->cm_has_degamma = cur->cm_has_degamma;
5348 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5350 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5352 return &state->base;
5355 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5357 enum dc_irq_source irq_source;
5358 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5359 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5362 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5364 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5366 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5367 acrtc->crtc_id, enable ? "en" : "dis", rc);
5371 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5373 enum dc_irq_source irq_source;
5374 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5375 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5376 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5377 struct amdgpu_display_manager *dm = &adev->dm;
5381 /* vblank irq on -> Only need vupdate irq in vrr mode */
5382 if (amdgpu_dm_vrr_active(acrtc_state))
5383 rc = dm_set_vupdate_irq(crtc, true);
5385 /* vblank irq off -> vupdate irq off */
5386 rc = dm_set_vupdate_irq(crtc, false);
5392 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5394 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5397 if (amdgpu_in_reset(adev))
5400 mutex_lock(&dm->dc_lock);
5403 dm->active_vblank_irq_count++;
5405 dm->active_vblank_irq_count--;
5407 #if defined(CONFIG_DRM_AMD_DC_DCN)
5408 dc_allow_idle_optimizations(
5409 adev->dm.dc, dm->active_vblank_irq_count == 0 ? true : false);
5411 DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
5414 mutex_unlock(&dm->dc_lock);
5419 static int dm_enable_vblank(struct drm_crtc *crtc)
5421 return dm_set_vblank(crtc, true);
5424 static void dm_disable_vblank(struct drm_crtc *crtc)
5426 dm_set_vblank(crtc, false);
5429 /* Implemented only the options currently availible for the driver */
5430 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5431 .reset = dm_crtc_reset_state,
5432 .destroy = amdgpu_dm_crtc_destroy,
5433 .set_config = drm_atomic_helper_set_config,
5434 .page_flip = drm_atomic_helper_page_flip,
5435 .atomic_duplicate_state = dm_crtc_duplicate_state,
5436 .atomic_destroy_state = dm_crtc_destroy_state,
5437 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5438 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5439 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5440 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5441 .enable_vblank = dm_enable_vblank,
5442 .disable_vblank = dm_disable_vblank,
5443 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5446 static enum drm_connector_status
5447 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5450 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5454 * 1. This interface is NOT called in context of HPD irq.
5455 * 2. This interface *is called* in context of user-mode ioctl. Which
5456 * makes it a bad place for *any* MST-related activity.
5459 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5460 !aconnector->fake_enable)
5461 connected = (aconnector->dc_sink != NULL);
5463 connected = (aconnector->base.force == DRM_FORCE_ON);
5465 update_subconnector_property(aconnector);
5467 return (connected ? connector_status_connected :
5468 connector_status_disconnected);
5471 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5472 struct drm_connector_state *connector_state,
5473 struct drm_property *property,
5476 struct drm_device *dev = connector->dev;
5477 struct amdgpu_device *adev = drm_to_adev(dev);
5478 struct dm_connector_state *dm_old_state =
5479 to_dm_connector_state(connector->state);
5480 struct dm_connector_state *dm_new_state =
5481 to_dm_connector_state(connector_state);
5485 if (property == dev->mode_config.scaling_mode_property) {
5486 enum amdgpu_rmx_type rmx_type;
5489 case DRM_MODE_SCALE_CENTER:
5490 rmx_type = RMX_CENTER;
5492 case DRM_MODE_SCALE_ASPECT:
5493 rmx_type = RMX_ASPECT;
5495 case DRM_MODE_SCALE_FULLSCREEN:
5496 rmx_type = RMX_FULL;
5498 case DRM_MODE_SCALE_NONE:
5504 if (dm_old_state->scaling == rmx_type)
5507 dm_new_state->scaling = rmx_type;
5509 } else if (property == adev->mode_info.underscan_hborder_property) {
5510 dm_new_state->underscan_hborder = val;
5512 } else if (property == adev->mode_info.underscan_vborder_property) {
5513 dm_new_state->underscan_vborder = val;
5515 } else if (property == adev->mode_info.underscan_property) {
5516 dm_new_state->underscan_enable = val;
5518 } else if (property == adev->mode_info.abm_level_property) {
5519 dm_new_state->abm_level = val;
5526 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5527 const struct drm_connector_state *state,
5528 struct drm_property *property,
5531 struct drm_device *dev = connector->dev;
5532 struct amdgpu_device *adev = drm_to_adev(dev);
5533 struct dm_connector_state *dm_state =
5534 to_dm_connector_state(state);
5537 if (property == dev->mode_config.scaling_mode_property) {
5538 switch (dm_state->scaling) {
5540 *val = DRM_MODE_SCALE_CENTER;
5543 *val = DRM_MODE_SCALE_ASPECT;
5546 *val = DRM_MODE_SCALE_FULLSCREEN;
5550 *val = DRM_MODE_SCALE_NONE;
5554 } else if (property == adev->mode_info.underscan_hborder_property) {
5555 *val = dm_state->underscan_hborder;
5557 } else if (property == adev->mode_info.underscan_vborder_property) {
5558 *val = dm_state->underscan_vborder;
5560 } else if (property == adev->mode_info.underscan_property) {
5561 *val = dm_state->underscan_enable;
5563 } else if (property == adev->mode_info.abm_level_property) {
5564 *val = dm_state->abm_level;
5571 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5573 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5575 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5578 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5580 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5581 const struct dc_link *link = aconnector->dc_link;
5582 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5583 struct amdgpu_display_manager *dm = &adev->dm;
5586 * Call only if mst_mgr was iniitalized before since it's not done
5587 * for all connector types.
5589 if (aconnector->mst_mgr.dev)
5590 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5592 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5593 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5595 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5596 link->type != dc_connection_none &&
5597 dm->backlight_dev) {
5598 backlight_device_unregister(dm->backlight_dev);
5599 dm->backlight_dev = NULL;
5603 if (aconnector->dc_em_sink)
5604 dc_sink_release(aconnector->dc_em_sink);
5605 aconnector->dc_em_sink = NULL;
5606 if (aconnector->dc_sink)
5607 dc_sink_release(aconnector->dc_sink);
5608 aconnector->dc_sink = NULL;
5610 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5611 drm_connector_unregister(connector);
5612 drm_connector_cleanup(connector);
5613 if (aconnector->i2c) {
5614 i2c_del_adapter(&aconnector->i2c->base);
5615 kfree(aconnector->i2c);
5617 kfree(aconnector->dm_dp_aux.aux.name);
5622 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5624 struct dm_connector_state *state =
5625 to_dm_connector_state(connector->state);
5627 if (connector->state)
5628 __drm_atomic_helper_connector_destroy_state(connector->state);
5632 state = kzalloc(sizeof(*state), GFP_KERNEL);
5635 state->scaling = RMX_OFF;
5636 state->underscan_enable = false;
5637 state->underscan_hborder = 0;
5638 state->underscan_vborder = 0;
5639 state->base.max_requested_bpc = 8;
5640 state->vcpi_slots = 0;
5642 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5643 state->abm_level = amdgpu_dm_abm_level;
5645 __drm_atomic_helper_connector_reset(connector, &state->base);
5649 struct drm_connector_state *
5650 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5652 struct dm_connector_state *state =
5653 to_dm_connector_state(connector->state);
5655 struct dm_connector_state *new_state =
5656 kmemdup(state, sizeof(*state), GFP_KERNEL);
5661 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5663 new_state->freesync_capable = state->freesync_capable;
5664 new_state->abm_level = state->abm_level;
5665 new_state->scaling = state->scaling;
5666 new_state->underscan_enable = state->underscan_enable;
5667 new_state->underscan_hborder = state->underscan_hborder;
5668 new_state->underscan_vborder = state->underscan_vborder;
5669 new_state->vcpi_slots = state->vcpi_slots;
5670 new_state->pbn = state->pbn;
5671 return &new_state->base;
5675 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5677 struct amdgpu_dm_connector *amdgpu_dm_connector =
5678 to_amdgpu_dm_connector(connector);
5681 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5682 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5683 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5684 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5689 #if defined(CONFIG_DEBUG_FS)
5690 connector_debugfs_init(amdgpu_dm_connector);
5696 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5697 .reset = amdgpu_dm_connector_funcs_reset,
5698 .detect = amdgpu_dm_connector_detect,
5699 .fill_modes = drm_helper_probe_single_connector_modes,
5700 .destroy = amdgpu_dm_connector_destroy,
5701 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5702 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5703 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5704 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5705 .late_register = amdgpu_dm_connector_late_register,
5706 .early_unregister = amdgpu_dm_connector_unregister
5709 static int get_modes(struct drm_connector *connector)
5711 return amdgpu_dm_connector_get_modes(connector);
5714 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5716 struct dc_sink_init_data init_params = {
5717 .link = aconnector->dc_link,
5718 .sink_signal = SIGNAL_TYPE_VIRTUAL
5722 if (!aconnector->base.edid_blob_ptr) {
5723 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5724 aconnector->base.name);
5726 aconnector->base.force = DRM_FORCE_OFF;
5727 aconnector->base.override_edid = false;
5731 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5733 aconnector->edid = edid;
5735 aconnector->dc_em_sink = dc_link_add_remote_sink(
5736 aconnector->dc_link,
5738 (edid->extensions + 1) * EDID_LENGTH,
5741 if (aconnector->base.force == DRM_FORCE_ON) {
5742 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5743 aconnector->dc_link->local_sink :
5744 aconnector->dc_em_sink;
5745 dc_sink_retain(aconnector->dc_sink);
5749 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5751 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5754 * In case of headless boot with force on for DP managed connector
5755 * Those settings have to be != 0 to get initial modeset
5757 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5758 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5759 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5763 aconnector->base.override_edid = true;
5764 create_eml_sink(aconnector);
5767 static struct dc_stream_state *
5768 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5769 const struct drm_display_mode *drm_mode,
5770 const struct dm_connector_state *dm_state,
5771 const struct dc_stream_state *old_stream)
5773 struct drm_connector *connector = &aconnector->base;
5774 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5775 struct dc_stream_state *stream;
5776 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5777 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5778 enum dc_status dc_result = DC_OK;
5781 stream = create_stream_for_sink(aconnector, drm_mode,
5782 dm_state, old_stream,
5784 if (stream == NULL) {
5785 DRM_ERROR("Failed to create stream for sink!\n");
5789 dc_result = dc_validate_stream(adev->dm.dc, stream);
5791 if (dc_result != DC_OK) {
5792 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5797 dc_status_to_str(dc_result));
5799 dc_stream_release(stream);
5801 requested_bpc -= 2; /* lower bpc to retry validation */
5804 } while (stream == NULL && requested_bpc >= 6);
5809 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5810 struct drm_display_mode *mode)
5812 int result = MODE_ERROR;
5813 struct dc_sink *dc_sink;
5814 /* TODO: Unhardcode stream count */
5815 struct dc_stream_state *stream;
5816 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5818 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5819 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5823 * Only run this the first time mode_valid is called to initilialize
5826 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5827 !aconnector->dc_em_sink)
5828 handle_edid_mgmt(aconnector);
5830 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5832 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5833 aconnector->base.force != DRM_FORCE_ON) {
5834 DRM_ERROR("dc_sink is NULL!\n");
5838 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5840 dc_stream_release(stream);
5845 /* TODO: error handling*/
5849 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5850 struct dc_info_packet *out)
5852 struct hdmi_drm_infoframe frame;
5853 unsigned char buf[30]; /* 26 + 4 */
5857 memset(out, 0, sizeof(*out));
5859 if (!state->hdr_output_metadata)
5862 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5866 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5870 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5874 /* Prepare the infopacket for DC. */
5875 switch (state->connector->connector_type) {
5876 case DRM_MODE_CONNECTOR_HDMIA:
5877 out->hb0 = 0x87; /* type */
5878 out->hb1 = 0x01; /* version */
5879 out->hb2 = 0x1A; /* length */
5880 out->sb[0] = buf[3]; /* checksum */
5884 case DRM_MODE_CONNECTOR_DisplayPort:
5885 case DRM_MODE_CONNECTOR_eDP:
5886 out->hb0 = 0x00; /* sdp id, zero */
5887 out->hb1 = 0x87; /* type */
5888 out->hb2 = 0x1D; /* payload len - 1 */
5889 out->hb3 = (0x13 << 2); /* sdp version */
5890 out->sb[0] = 0x01; /* version */
5891 out->sb[1] = 0x1A; /* length */
5899 memcpy(&out->sb[i], &buf[4], 26);
5902 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5903 sizeof(out->sb), false);
5909 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5910 const struct drm_connector_state *new_state)
5912 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5913 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5915 if (old_blob != new_blob) {
5916 if (old_blob && new_blob &&
5917 old_blob->length == new_blob->length)
5918 return memcmp(old_blob->data, new_blob->data,
5928 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5929 struct drm_atomic_state *state)
5931 struct drm_connector_state *new_con_state =
5932 drm_atomic_get_new_connector_state(state, conn);
5933 struct drm_connector_state *old_con_state =
5934 drm_atomic_get_old_connector_state(state, conn);
5935 struct drm_crtc *crtc = new_con_state->crtc;
5936 struct drm_crtc_state *new_crtc_state;
5939 trace_amdgpu_dm_connector_atomic_check(new_con_state);
5944 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5945 struct dc_info_packet hdr_infopacket;
5947 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5951 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5952 if (IS_ERR(new_crtc_state))
5953 return PTR_ERR(new_crtc_state);
5956 * DC considers the stream backends changed if the
5957 * static metadata changes. Forcing the modeset also
5958 * gives a simple way for userspace to switch from
5959 * 8bpc to 10bpc when setting the metadata to enter
5962 * Changing the static metadata after it's been
5963 * set is permissible, however. So only force a
5964 * modeset if we're entering or exiting HDR.
5966 new_crtc_state->mode_changed =
5967 !old_con_state->hdr_output_metadata ||
5968 !new_con_state->hdr_output_metadata;
5974 static const struct drm_connector_helper_funcs
5975 amdgpu_dm_connector_helper_funcs = {
5977 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5978 * modes will be filtered by drm_mode_validate_size(), and those modes
5979 * are missing after user start lightdm. So we need to renew modes list.
5980 * in get_modes call back, not just return the modes count
5982 .get_modes = get_modes,
5983 .mode_valid = amdgpu_dm_connector_mode_valid,
5984 .atomic_check = amdgpu_dm_connector_atomic_check,
5987 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5991 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5993 struct drm_atomic_state *state = new_crtc_state->state;
5994 struct drm_plane *plane;
5997 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5998 struct drm_plane_state *new_plane_state;
6000 /* Cursor planes are "fake". */
6001 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6004 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6006 if (!new_plane_state) {
6008 * The plane is enable on the CRTC and hasn't changed
6009 * state. This means that it previously passed
6010 * validation and is therefore enabled.
6016 /* We need a framebuffer to be considered enabled. */
6017 num_active += (new_plane_state->fb != NULL);
6023 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6024 struct drm_crtc_state *new_crtc_state)
6026 struct dm_crtc_state *dm_new_crtc_state =
6027 to_dm_crtc_state(new_crtc_state);
6029 dm_new_crtc_state->active_planes = 0;
6031 if (!dm_new_crtc_state->stream)
6034 dm_new_crtc_state->active_planes =
6035 count_crtc_active_planes(new_crtc_state);
6038 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6039 struct drm_atomic_state *state)
6041 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6043 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6044 struct dc *dc = adev->dm.dc;
6045 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6048 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6050 dm_update_crtc_active_planes(crtc, crtc_state);
6052 if (unlikely(!dm_crtc_state->stream &&
6053 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6059 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6060 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6061 * planes are disabled, which is not supported by the hardware. And there is legacy
6062 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6064 if (crtc_state->enable &&
6065 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6066 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6070 /* In some use cases, like reset, no stream is attached */
6071 if (!dm_crtc_state->stream)
6074 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6077 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6081 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6082 const struct drm_display_mode *mode,
6083 struct drm_display_mode *adjusted_mode)
6088 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6089 .disable = dm_crtc_helper_disable,
6090 .atomic_check = dm_crtc_helper_atomic_check,
6091 .mode_fixup = dm_crtc_helper_mode_fixup,
6092 .get_scanout_position = amdgpu_crtc_get_scanout_position,
6095 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6100 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6102 switch (display_color_depth) {
6103 case COLOR_DEPTH_666:
6105 case COLOR_DEPTH_888:
6107 case COLOR_DEPTH_101010:
6109 case COLOR_DEPTH_121212:
6111 case COLOR_DEPTH_141414:
6113 case COLOR_DEPTH_161616:
6121 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6122 struct drm_crtc_state *crtc_state,
6123 struct drm_connector_state *conn_state)
6125 struct drm_atomic_state *state = crtc_state->state;
6126 struct drm_connector *connector = conn_state->connector;
6127 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6128 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6129 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6130 struct drm_dp_mst_topology_mgr *mst_mgr;
6131 struct drm_dp_mst_port *mst_port;
6132 enum dc_color_depth color_depth;
6134 bool is_y420 = false;
6136 if (!aconnector->port || !aconnector->dc_sink)
6139 mst_port = aconnector->port;
6140 mst_mgr = &aconnector->mst_port->mst_mgr;
6142 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6145 if (!state->duplicated) {
6146 int max_bpc = conn_state->max_requested_bpc;
6147 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6148 aconnector->force_yuv420_output;
6149 color_depth = convert_color_depth_from_display_info(connector,
6152 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6153 clock = adjusted_mode->clock;
6154 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6156 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6159 dm_new_connector_state->pbn,
6160 dm_mst_get_pbn_divider(aconnector->dc_link));
6161 if (dm_new_connector_state->vcpi_slots < 0) {
6162 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6163 return dm_new_connector_state->vcpi_slots;
6168 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6169 .disable = dm_encoder_helper_disable,
6170 .atomic_check = dm_encoder_helper_atomic_check
6173 #if defined(CONFIG_DRM_AMD_DC_DCN)
6174 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6175 struct dc_state *dc_state)
6177 struct dc_stream_state *stream = NULL;
6178 struct drm_connector *connector;
6179 struct drm_connector_state *new_con_state, *old_con_state;
6180 struct amdgpu_dm_connector *aconnector;
6181 struct dm_connector_state *dm_conn_state;
6182 int i, j, clock, bpp;
6183 int vcpi, pbn_div, pbn = 0;
6185 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6187 aconnector = to_amdgpu_dm_connector(connector);
6189 if (!aconnector->port)
6192 if (!new_con_state || !new_con_state->crtc)
6195 dm_conn_state = to_dm_connector_state(new_con_state);
6197 for (j = 0; j < dc_state->stream_count; j++) {
6198 stream = dc_state->streams[j];
6202 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6211 if (stream->timing.flags.DSC != 1) {
6212 drm_dp_mst_atomic_enable_dsc(state,
6220 pbn_div = dm_mst_get_pbn_divider(stream->link);
6221 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6222 clock = stream->timing.pix_clk_100hz / 10;
6223 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6224 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6231 dm_conn_state->pbn = pbn;
6232 dm_conn_state->vcpi_slots = vcpi;
6238 static void dm_drm_plane_reset(struct drm_plane *plane)
6240 struct dm_plane_state *amdgpu_state = NULL;
6243 plane->funcs->atomic_destroy_state(plane, plane->state);
6245 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6246 WARN_ON(amdgpu_state == NULL);
6249 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6252 static struct drm_plane_state *
6253 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6255 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6257 old_dm_plane_state = to_dm_plane_state(plane->state);
6258 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6259 if (!dm_plane_state)
6262 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6264 if (old_dm_plane_state->dc_state) {
6265 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6266 dc_plane_state_retain(dm_plane_state->dc_state);
6269 return &dm_plane_state->base;
6272 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6273 struct drm_plane_state *state)
6275 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6277 if (dm_plane_state->dc_state)
6278 dc_plane_state_release(dm_plane_state->dc_state);
6280 drm_atomic_helper_plane_destroy_state(plane, state);
6283 static const struct drm_plane_funcs dm_plane_funcs = {
6284 .update_plane = drm_atomic_helper_update_plane,
6285 .disable_plane = drm_atomic_helper_disable_plane,
6286 .destroy = drm_primary_helper_destroy,
6287 .reset = dm_drm_plane_reset,
6288 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6289 .atomic_destroy_state = dm_drm_plane_destroy_state,
6290 .format_mod_supported = dm_plane_format_mod_supported,
6293 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6294 struct drm_plane_state *new_state)
6296 struct amdgpu_framebuffer *afb;
6297 struct drm_gem_object *obj;
6298 struct amdgpu_device *adev;
6299 struct amdgpu_bo *rbo;
6300 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6301 struct list_head list;
6302 struct ttm_validate_buffer tv;
6303 struct ww_acquire_ctx ticket;
6307 if (!new_state->fb) {
6308 DRM_DEBUG_DRIVER("No FB bound\n");
6312 afb = to_amdgpu_framebuffer(new_state->fb);
6313 obj = new_state->fb->obj[0];
6314 rbo = gem_to_amdgpu_bo(obj);
6315 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6316 INIT_LIST_HEAD(&list);
6320 list_add(&tv.head, &list);
6322 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6324 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6328 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6329 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6331 domain = AMDGPU_GEM_DOMAIN_VRAM;
6333 r = amdgpu_bo_pin(rbo, domain);
6334 if (unlikely(r != 0)) {
6335 if (r != -ERESTARTSYS)
6336 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6337 ttm_eu_backoff_reservation(&ticket, &list);
6341 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6342 if (unlikely(r != 0)) {
6343 amdgpu_bo_unpin(rbo);
6344 ttm_eu_backoff_reservation(&ticket, &list);
6345 DRM_ERROR("%p bind failed\n", rbo);
6349 ttm_eu_backoff_reservation(&ticket, &list);
6351 afb->address = amdgpu_bo_gpu_offset(rbo);
6356 * We don't do surface updates on planes that have been newly created,
6357 * but we also don't have the afb->address during atomic check.
6359 * Fill in buffer attributes depending on the address here, but only on
6360 * newly created planes since they're not being used by DC yet and this
6361 * won't modify global state.
6363 dm_plane_state_old = to_dm_plane_state(plane->state);
6364 dm_plane_state_new = to_dm_plane_state(new_state);
6366 if (dm_plane_state_new->dc_state &&
6367 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6368 struct dc_plane_state *plane_state =
6369 dm_plane_state_new->dc_state;
6370 bool force_disable_dcc = !plane_state->dcc.enable;
6372 fill_plane_buffer_attributes(
6373 adev, afb, plane_state->format, plane_state->rotation,
6375 &plane_state->tiling_info, &plane_state->plane_size,
6376 &plane_state->dcc, &plane_state->address,
6377 afb->tmz_surface, force_disable_dcc);
6383 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6384 struct drm_plane_state *old_state)
6386 struct amdgpu_bo *rbo;
6392 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6393 r = amdgpu_bo_reserve(rbo, false);
6395 DRM_ERROR("failed to reserve rbo before unpin\n");
6399 amdgpu_bo_unpin(rbo);
6400 amdgpu_bo_unreserve(rbo);
6401 amdgpu_bo_unref(&rbo);
6404 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6405 struct drm_crtc_state *new_crtc_state)
6407 struct drm_framebuffer *fb = state->fb;
6408 int min_downscale, max_upscale;
6410 int max_scale = INT_MAX;
6412 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6413 if (fb && state->crtc) {
6414 /* Validate viewport to cover the case when only the position changes */
6415 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6416 int viewport_width = state->crtc_w;
6417 int viewport_height = state->crtc_h;
6419 if (state->crtc_x < 0)
6420 viewport_width += state->crtc_x;
6421 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6422 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6424 if (state->crtc_y < 0)
6425 viewport_height += state->crtc_y;
6426 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6427 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6429 /* If completely outside of screen, viewport_width and/or viewport_height will be negative,
6430 * which is still OK to satisfy the condition below, thereby also covering these cases
6431 * (when plane is completely outside of screen).
6432 * x2 for width is because of pipe-split.
6434 if (viewport_width < MIN_VIEWPORT_SIZE*2 || viewport_height < MIN_VIEWPORT_SIZE)
6438 /* Get min/max allowed scaling factors from plane caps. */
6439 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6440 &min_downscale, &max_upscale);
6442 * Convert to drm convention: 16.16 fixed point, instead of dc's
6443 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6444 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6446 min_scale = (1000 << 16) / max_upscale;
6447 max_scale = (1000 << 16) / min_downscale;
6450 return drm_atomic_helper_check_plane_state(
6451 state, new_crtc_state, min_scale, max_scale, true, true);
6454 static int dm_plane_atomic_check(struct drm_plane *plane,
6455 struct drm_plane_state *state)
6457 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6458 struct dc *dc = adev->dm.dc;
6459 struct dm_plane_state *dm_plane_state;
6460 struct dc_scaling_info scaling_info;
6461 struct drm_crtc_state *new_crtc_state;
6464 trace_amdgpu_dm_plane_atomic_check(state);
6466 dm_plane_state = to_dm_plane_state(state);
6468 if (!dm_plane_state->dc_state)
6472 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6473 if (!new_crtc_state)
6476 ret = dm_plane_helper_check_state(state, new_crtc_state);
6480 ret = fill_dc_scaling_info(state, &scaling_info);
6484 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6490 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6491 struct drm_plane_state *new_plane_state)
6493 /* Only support async updates on cursor planes. */
6494 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6500 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6501 struct drm_plane_state *new_state)
6503 struct drm_plane_state *old_state =
6504 drm_atomic_get_old_plane_state(new_state->state, plane);
6506 trace_amdgpu_dm_atomic_update_cursor(new_state);
6508 swap(plane->state->fb, new_state->fb);
6510 plane->state->src_x = new_state->src_x;
6511 plane->state->src_y = new_state->src_y;
6512 plane->state->src_w = new_state->src_w;
6513 plane->state->src_h = new_state->src_h;
6514 plane->state->crtc_x = new_state->crtc_x;
6515 plane->state->crtc_y = new_state->crtc_y;
6516 plane->state->crtc_w = new_state->crtc_w;
6517 plane->state->crtc_h = new_state->crtc_h;
6519 handle_cursor_update(plane, old_state);
6522 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6523 .prepare_fb = dm_plane_helper_prepare_fb,
6524 .cleanup_fb = dm_plane_helper_cleanup_fb,
6525 .atomic_check = dm_plane_atomic_check,
6526 .atomic_async_check = dm_plane_atomic_async_check,
6527 .atomic_async_update = dm_plane_atomic_async_update
6531 * TODO: these are currently initialized to rgb formats only.
6532 * For future use cases we should either initialize them dynamically based on
6533 * plane capabilities, or initialize this array to all formats, so internal drm
6534 * check will succeed, and let DC implement proper check
6536 static const uint32_t rgb_formats[] = {
6537 DRM_FORMAT_XRGB8888,
6538 DRM_FORMAT_ARGB8888,
6539 DRM_FORMAT_RGBA8888,
6540 DRM_FORMAT_XRGB2101010,
6541 DRM_FORMAT_XBGR2101010,
6542 DRM_FORMAT_ARGB2101010,
6543 DRM_FORMAT_ABGR2101010,
6544 DRM_FORMAT_XBGR8888,
6545 DRM_FORMAT_ABGR8888,
6549 static const uint32_t overlay_formats[] = {
6550 DRM_FORMAT_XRGB8888,
6551 DRM_FORMAT_ARGB8888,
6552 DRM_FORMAT_RGBA8888,
6553 DRM_FORMAT_XBGR8888,
6554 DRM_FORMAT_ABGR8888,
6558 static const u32 cursor_formats[] = {
6562 static int get_plane_formats(const struct drm_plane *plane,
6563 const struct dc_plane_cap *plane_cap,
6564 uint32_t *formats, int max_formats)
6566 int i, num_formats = 0;
6569 * TODO: Query support for each group of formats directly from
6570 * DC plane caps. This will require adding more formats to the
6574 switch (plane->type) {
6575 case DRM_PLANE_TYPE_PRIMARY:
6576 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6577 if (num_formats >= max_formats)
6580 formats[num_formats++] = rgb_formats[i];
6583 if (plane_cap && plane_cap->pixel_format_support.nv12)
6584 formats[num_formats++] = DRM_FORMAT_NV12;
6585 if (plane_cap && plane_cap->pixel_format_support.p010)
6586 formats[num_formats++] = DRM_FORMAT_P010;
6587 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6588 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6589 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6590 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6591 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6595 case DRM_PLANE_TYPE_OVERLAY:
6596 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6597 if (num_formats >= max_formats)
6600 formats[num_formats++] = overlay_formats[i];
6604 case DRM_PLANE_TYPE_CURSOR:
6605 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6606 if (num_formats >= max_formats)
6609 formats[num_formats++] = cursor_formats[i];
6617 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6618 struct drm_plane *plane,
6619 unsigned long possible_crtcs,
6620 const struct dc_plane_cap *plane_cap)
6622 uint32_t formats[32];
6625 unsigned int supported_rotations;
6626 uint64_t *modifiers = NULL;
6628 num_formats = get_plane_formats(plane, plane_cap, formats,
6629 ARRAY_SIZE(formats));
6631 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6635 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6636 &dm_plane_funcs, formats, num_formats,
6637 modifiers, plane->type, NULL);
6642 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6643 plane_cap && plane_cap->per_pixel_alpha) {
6644 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6645 BIT(DRM_MODE_BLEND_PREMULTI);
6647 drm_plane_create_alpha_property(plane);
6648 drm_plane_create_blend_mode_property(plane, blend_caps);
6651 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6653 (plane_cap->pixel_format_support.nv12 ||
6654 plane_cap->pixel_format_support.p010)) {
6655 /* This only affects YUV formats. */
6656 drm_plane_create_color_properties(
6658 BIT(DRM_COLOR_YCBCR_BT601) |
6659 BIT(DRM_COLOR_YCBCR_BT709) |
6660 BIT(DRM_COLOR_YCBCR_BT2020),
6661 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6662 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6663 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6666 supported_rotations =
6667 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6668 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6670 if (dm->adev->asic_type >= CHIP_BONAIRE &&
6671 plane->type != DRM_PLANE_TYPE_CURSOR)
6672 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6673 supported_rotations);
6675 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6677 /* Create (reset) the plane state */
6678 if (plane->funcs->reset)
6679 plane->funcs->reset(plane);
6684 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6685 struct drm_plane *plane,
6686 uint32_t crtc_index)
6688 struct amdgpu_crtc *acrtc = NULL;
6689 struct drm_plane *cursor_plane;
6693 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6697 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6698 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6700 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6704 res = drm_crtc_init_with_planes(
6709 &amdgpu_dm_crtc_funcs, NULL);
6714 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6716 /* Create (reset) the plane state */
6717 if (acrtc->base.funcs->reset)
6718 acrtc->base.funcs->reset(&acrtc->base);
6720 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6721 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6723 acrtc->crtc_id = crtc_index;
6724 acrtc->base.enabled = false;
6725 acrtc->otg_inst = -1;
6727 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6728 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6729 true, MAX_COLOR_LUT_ENTRIES);
6730 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6736 kfree(cursor_plane);
6741 static int to_drm_connector_type(enum signal_type st)
6744 case SIGNAL_TYPE_HDMI_TYPE_A:
6745 return DRM_MODE_CONNECTOR_HDMIA;
6746 case SIGNAL_TYPE_EDP:
6747 return DRM_MODE_CONNECTOR_eDP;
6748 case SIGNAL_TYPE_LVDS:
6749 return DRM_MODE_CONNECTOR_LVDS;
6750 case SIGNAL_TYPE_RGB:
6751 return DRM_MODE_CONNECTOR_VGA;
6752 case SIGNAL_TYPE_DISPLAY_PORT:
6753 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6754 return DRM_MODE_CONNECTOR_DisplayPort;
6755 case SIGNAL_TYPE_DVI_DUAL_LINK:
6756 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6757 return DRM_MODE_CONNECTOR_DVID;
6758 case SIGNAL_TYPE_VIRTUAL:
6759 return DRM_MODE_CONNECTOR_VIRTUAL;
6762 return DRM_MODE_CONNECTOR_Unknown;
6766 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6768 struct drm_encoder *encoder;
6770 /* There is only one encoder per connector */
6771 drm_connector_for_each_possible_encoder(connector, encoder)
6777 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6779 struct drm_encoder *encoder;
6780 struct amdgpu_encoder *amdgpu_encoder;
6782 encoder = amdgpu_dm_connector_to_encoder(connector);
6784 if (encoder == NULL)
6787 amdgpu_encoder = to_amdgpu_encoder(encoder);
6789 amdgpu_encoder->native_mode.clock = 0;
6791 if (!list_empty(&connector->probed_modes)) {
6792 struct drm_display_mode *preferred_mode = NULL;
6794 list_for_each_entry(preferred_mode,
6795 &connector->probed_modes,
6797 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6798 amdgpu_encoder->native_mode = *preferred_mode;
6806 static struct drm_display_mode *
6807 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6809 int hdisplay, int vdisplay)
6811 struct drm_device *dev = encoder->dev;
6812 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6813 struct drm_display_mode *mode = NULL;
6814 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6816 mode = drm_mode_duplicate(dev, native_mode);
6821 mode->hdisplay = hdisplay;
6822 mode->vdisplay = vdisplay;
6823 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6824 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6830 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6831 struct drm_connector *connector)
6833 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6834 struct drm_display_mode *mode = NULL;
6835 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6836 struct amdgpu_dm_connector *amdgpu_dm_connector =
6837 to_amdgpu_dm_connector(connector);
6841 char name[DRM_DISPLAY_MODE_LEN];
6844 } common_modes[] = {
6845 { "640x480", 640, 480},
6846 { "800x600", 800, 600},
6847 { "1024x768", 1024, 768},
6848 { "1280x720", 1280, 720},
6849 { "1280x800", 1280, 800},
6850 {"1280x1024", 1280, 1024},
6851 { "1440x900", 1440, 900},
6852 {"1680x1050", 1680, 1050},
6853 {"1600x1200", 1600, 1200},
6854 {"1920x1080", 1920, 1080},
6855 {"1920x1200", 1920, 1200}
6858 n = ARRAY_SIZE(common_modes);
6860 for (i = 0; i < n; i++) {
6861 struct drm_display_mode *curmode = NULL;
6862 bool mode_existed = false;
6864 if (common_modes[i].w > native_mode->hdisplay ||
6865 common_modes[i].h > native_mode->vdisplay ||
6866 (common_modes[i].w == native_mode->hdisplay &&
6867 common_modes[i].h == native_mode->vdisplay))
6870 list_for_each_entry(curmode, &connector->probed_modes, head) {
6871 if (common_modes[i].w == curmode->hdisplay &&
6872 common_modes[i].h == curmode->vdisplay) {
6873 mode_existed = true;
6881 mode = amdgpu_dm_create_common_mode(encoder,
6882 common_modes[i].name, common_modes[i].w,
6884 drm_mode_probed_add(connector, mode);
6885 amdgpu_dm_connector->num_modes++;
6889 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6892 struct amdgpu_dm_connector *amdgpu_dm_connector =
6893 to_amdgpu_dm_connector(connector);
6896 /* empty probed_modes */
6897 INIT_LIST_HEAD(&connector->probed_modes);
6898 amdgpu_dm_connector->num_modes =
6899 drm_add_edid_modes(connector, edid);
6901 /* sorting the probed modes before calling function
6902 * amdgpu_dm_get_native_mode() since EDID can have
6903 * more than one preferred mode. The modes that are
6904 * later in the probed mode list could be of higher
6905 * and preferred resolution. For example, 3840x2160
6906 * resolution in base EDID preferred timing and 4096x2160
6907 * preferred resolution in DID extension block later.
6909 drm_mode_sort(&connector->probed_modes);
6910 amdgpu_dm_get_native_mode(connector);
6912 amdgpu_dm_connector->num_modes = 0;
6916 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6918 struct amdgpu_dm_connector *amdgpu_dm_connector =
6919 to_amdgpu_dm_connector(connector);
6920 struct drm_encoder *encoder;
6921 struct edid *edid = amdgpu_dm_connector->edid;
6923 encoder = amdgpu_dm_connector_to_encoder(connector);
6925 if (!drm_edid_is_valid(edid)) {
6926 amdgpu_dm_connector->num_modes =
6927 drm_add_modes_noedid(connector, 640, 480);
6929 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6930 amdgpu_dm_connector_add_common_modes(encoder, connector);
6932 amdgpu_dm_fbc_init(connector);
6934 return amdgpu_dm_connector->num_modes;
6937 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6938 struct amdgpu_dm_connector *aconnector,
6940 struct dc_link *link,
6943 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6946 * Some of the properties below require access to state, like bpc.
6947 * Allocate some default initial connector state with our reset helper.
6949 if (aconnector->base.funcs->reset)
6950 aconnector->base.funcs->reset(&aconnector->base);
6952 aconnector->connector_id = link_index;
6953 aconnector->dc_link = link;
6954 aconnector->base.interlace_allowed = false;
6955 aconnector->base.doublescan_allowed = false;
6956 aconnector->base.stereo_allowed = false;
6957 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6958 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6959 aconnector->audio_inst = -1;
6960 mutex_init(&aconnector->hpd_lock);
6963 * configure support HPD hot plug connector_>polled default value is 0
6964 * which means HPD hot plug not supported
6966 switch (connector_type) {
6967 case DRM_MODE_CONNECTOR_HDMIA:
6968 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6969 aconnector->base.ycbcr_420_allowed =
6970 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6972 case DRM_MODE_CONNECTOR_DisplayPort:
6973 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6974 aconnector->base.ycbcr_420_allowed =
6975 link->link_enc->features.dp_ycbcr420_supported ? true : false;
6977 case DRM_MODE_CONNECTOR_DVID:
6978 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6984 drm_object_attach_property(&aconnector->base.base,
6985 dm->ddev->mode_config.scaling_mode_property,
6986 DRM_MODE_SCALE_NONE);
6988 drm_object_attach_property(&aconnector->base.base,
6989 adev->mode_info.underscan_property,
6991 drm_object_attach_property(&aconnector->base.base,
6992 adev->mode_info.underscan_hborder_property,
6994 drm_object_attach_property(&aconnector->base.base,
6995 adev->mode_info.underscan_vborder_property,
6998 if (!aconnector->mst_port)
6999 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7001 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7002 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7003 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7005 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7006 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7007 drm_object_attach_property(&aconnector->base.base,
7008 adev->mode_info.abm_level_property, 0);
7011 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7012 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7013 connector_type == DRM_MODE_CONNECTOR_eDP) {
7014 drm_object_attach_property(
7015 &aconnector->base.base,
7016 dm->ddev->mode_config.hdr_output_metadata_property, 0);
7018 if (!aconnector->mst_port)
7019 drm_connector_attach_vrr_capable_property(&aconnector->base);
7021 #ifdef CONFIG_DRM_AMD_DC_HDCP
7022 if (adev->dm.hdcp_workqueue)
7023 drm_connector_attach_content_protection_property(&aconnector->base, true);
7028 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7029 struct i2c_msg *msgs, int num)
7031 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7032 struct ddc_service *ddc_service = i2c->ddc_service;
7033 struct i2c_command cmd;
7037 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7042 cmd.number_of_payloads = num;
7043 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7046 for (i = 0; i < num; i++) {
7047 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7048 cmd.payloads[i].address = msgs[i].addr;
7049 cmd.payloads[i].length = msgs[i].len;
7050 cmd.payloads[i].data = msgs[i].buf;
7054 ddc_service->ctx->dc,
7055 ddc_service->ddc_pin->hw_info.ddc_channel,
7059 kfree(cmd.payloads);
7063 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7065 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7068 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7069 .master_xfer = amdgpu_dm_i2c_xfer,
7070 .functionality = amdgpu_dm_i2c_func,
7073 static struct amdgpu_i2c_adapter *
7074 create_i2c(struct ddc_service *ddc_service,
7078 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7079 struct amdgpu_i2c_adapter *i2c;
7081 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7084 i2c->base.owner = THIS_MODULE;
7085 i2c->base.class = I2C_CLASS_DDC;
7086 i2c->base.dev.parent = &adev->pdev->dev;
7087 i2c->base.algo = &amdgpu_dm_i2c_algo;
7088 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7089 i2c_set_adapdata(&i2c->base, i2c);
7090 i2c->ddc_service = ddc_service;
7091 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7098 * Note: this function assumes that dc_link_detect() was called for the
7099 * dc_link which will be represented by this aconnector.
7101 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7102 struct amdgpu_dm_connector *aconnector,
7103 uint32_t link_index,
7104 struct amdgpu_encoder *aencoder)
7108 struct dc *dc = dm->dc;
7109 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7110 struct amdgpu_i2c_adapter *i2c;
7112 link->priv = aconnector;
7114 DRM_DEBUG_DRIVER("%s()\n", __func__);
7116 i2c = create_i2c(link->ddc, link->link_index, &res);
7118 DRM_ERROR("Failed to create i2c adapter data\n");
7122 aconnector->i2c = i2c;
7123 res = i2c_add_adapter(&i2c->base);
7126 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7130 connector_type = to_drm_connector_type(link->connector_signal);
7132 res = drm_connector_init_with_ddc(
7135 &amdgpu_dm_connector_funcs,
7140 DRM_ERROR("connector_init failed\n");
7141 aconnector->connector_id = -1;
7145 drm_connector_helper_add(
7147 &amdgpu_dm_connector_helper_funcs);
7149 amdgpu_dm_connector_init_helper(
7156 drm_connector_attach_encoder(
7157 &aconnector->base, &aencoder->base);
7159 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7160 || connector_type == DRM_MODE_CONNECTOR_eDP)
7161 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7166 aconnector->i2c = NULL;
7171 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7173 switch (adev->mode_info.num_crtc) {
7190 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7191 struct amdgpu_encoder *aencoder,
7192 uint32_t link_index)
7194 struct amdgpu_device *adev = drm_to_adev(dev);
7196 int res = drm_encoder_init(dev,
7198 &amdgpu_dm_encoder_funcs,
7199 DRM_MODE_ENCODER_TMDS,
7202 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7205 aencoder->encoder_id = link_index;
7207 aencoder->encoder_id = -1;
7209 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7214 static void manage_dm_interrupts(struct amdgpu_device *adev,
7215 struct amdgpu_crtc *acrtc,
7219 * We have no guarantee that the frontend index maps to the same
7220 * backend index - some even map to more than one.
7222 * TODO: Use a different interrupt or check DC itself for the mapping.
7225 amdgpu_display_crtc_idx_to_irq_type(
7230 drm_crtc_vblank_on(&acrtc->base);
7233 &adev->pageflip_irq,
7239 &adev->pageflip_irq,
7241 drm_crtc_vblank_off(&acrtc->base);
7245 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7246 struct amdgpu_crtc *acrtc)
7249 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7252 * This reads the current state for the IRQ and force reapplies
7253 * the setting to hardware.
7255 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7259 is_scaling_state_different(const struct dm_connector_state *dm_state,
7260 const struct dm_connector_state *old_dm_state)
7262 if (dm_state->scaling != old_dm_state->scaling)
7264 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7265 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7267 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7268 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7270 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7271 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7276 #ifdef CONFIG_DRM_AMD_DC_HDCP
7277 static bool is_content_protection_different(struct drm_connector_state *state,
7278 const struct drm_connector_state *old_state,
7279 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7281 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7282 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7284 /* Handle: Type0/1 change */
7285 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7286 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7287 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7291 /* CP is being re enabled, ignore this
7293 * Handles: ENABLED -> DESIRED
7295 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7296 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7297 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7301 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7303 * Handles: UNDESIRED -> ENABLED
7305 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7306 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7307 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7309 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7310 * hot-plug, headless s3, dpms
7312 * Handles: DESIRED -> DESIRED (Special case)
7314 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7315 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7316 dm_con_state->update_hdcp = false;
7321 * Handles: UNDESIRED -> UNDESIRED
7322 * DESIRED -> DESIRED
7323 * ENABLED -> ENABLED
7325 if (old_state->content_protection == state->content_protection)
7329 * Handles: UNDESIRED -> DESIRED
7330 * DESIRED -> UNDESIRED
7331 * ENABLED -> UNDESIRED
7333 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7337 * Handles: DESIRED -> ENABLED
7343 static void remove_stream(struct amdgpu_device *adev,
7344 struct amdgpu_crtc *acrtc,
7345 struct dc_stream_state *stream)
7347 /* this is the update mode case */
7349 acrtc->otg_inst = -1;
7350 acrtc->enabled = false;
7353 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7354 struct dc_cursor_position *position)
7356 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7358 int xorigin = 0, yorigin = 0;
7360 position->enable = false;
7364 if (!crtc || !plane->state->fb)
7367 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7368 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7369 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7371 plane->state->crtc_w,
7372 plane->state->crtc_h);
7376 x = plane->state->crtc_x;
7377 y = plane->state->crtc_y;
7379 if (x <= -amdgpu_crtc->max_cursor_width ||
7380 y <= -amdgpu_crtc->max_cursor_height)
7384 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7388 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7391 position->enable = true;
7392 position->translate_by_source = true;
7395 position->x_hotspot = xorigin;
7396 position->y_hotspot = yorigin;
7401 static void handle_cursor_update(struct drm_plane *plane,
7402 struct drm_plane_state *old_plane_state)
7404 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7405 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7406 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7407 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7408 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7409 uint64_t address = afb ? afb->address : 0;
7410 struct dc_cursor_position position;
7411 struct dc_cursor_attributes attributes;
7414 if (!plane->state->fb && !old_plane_state->fb)
7417 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7419 amdgpu_crtc->crtc_id,
7420 plane->state->crtc_w,
7421 plane->state->crtc_h);
7423 ret = get_cursor_position(plane, crtc, &position);
7427 if (!position.enable) {
7428 /* turn off cursor */
7429 if (crtc_state && crtc_state->stream) {
7430 mutex_lock(&adev->dm.dc_lock);
7431 dc_stream_set_cursor_position(crtc_state->stream,
7433 mutex_unlock(&adev->dm.dc_lock);
7438 amdgpu_crtc->cursor_width = plane->state->crtc_w;
7439 amdgpu_crtc->cursor_height = plane->state->crtc_h;
7441 memset(&attributes, 0, sizeof(attributes));
7442 attributes.address.high_part = upper_32_bits(address);
7443 attributes.address.low_part = lower_32_bits(address);
7444 attributes.width = plane->state->crtc_w;
7445 attributes.height = plane->state->crtc_h;
7446 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7447 attributes.rotation_angle = 0;
7448 attributes.attribute_flags.value = 0;
7450 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7452 if (crtc_state->stream) {
7453 mutex_lock(&adev->dm.dc_lock);
7454 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7456 DRM_ERROR("DC failed to set cursor attributes\n");
7458 if (!dc_stream_set_cursor_position(crtc_state->stream,
7460 DRM_ERROR("DC failed to set cursor position\n");
7461 mutex_unlock(&adev->dm.dc_lock);
7465 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7468 assert_spin_locked(&acrtc->base.dev->event_lock);
7469 WARN_ON(acrtc->event);
7471 acrtc->event = acrtc->base.state->event;
7473 /* Set the flip status */
7474 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7476 /* Mark this event as consumed */
7477 acrtc->base.state->event = NULL;
7479 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7483 static void update_freesync_state_on_stream(
7484 struct amdgpu_display_manager *dm,
7485 struct dm_crtc_state *new_crtc_state,
7486 struct dc_stream_state *new_stream,
7487 struct dc_plane_state *surface,
7488 u32 flip_timestamp_in_us)
7490 struct mod_vrr_params vrr_params;
7491 struct dc_info_packet vrr_infopacket = {0};
7492 struct amdgpu_device *adev = dm->adev;
7493 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7494 unsigned long flags;
7500 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7501 * For now it's sufficient to just guard against these conditions.
7504 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7507 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7508 vrr_params = acrtc->dm_irq_params.vrr_params;
7511 mod_freesync_handle_preflip(
7512 dm->freesync_module,
7515 flip_timestamp_in_us,
7518 if (adev->family < AMDGPU_FAMILY_AI &&
7519 amdgpu_dm_vrr_active(new_crtc_state)) {
7520 mod_freesync_handle_v_update(dm->freesync_module,
7521 new_stream, &vrr_params);
7523 /* Need to call this before the frame ends. */
7524 dc_stream_adjust_vmin_vmax(dm->dc,
7525 new_crtc_state->stream,
7526 &vrr_params.adjust);
7530 mod_freesync_build_vrr_infopacket(
7531 dm->freesync_module,
7535 TRANSFER_FUNC_UNKNOWN,
7538 new_crtc_state->freesync_timing_changed |=
7539 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7541 sizeof(vrr_params.adjust)) != 0);
7543 new_crtc_state->freesync_vrr_info_changed |=
7544 (memcmp(&new_crtc_state->vrr_infopacket,
7546 sizeof(vrr_infopacket)) != 0);
7548 acrtc->dm_irq_params.vrr_params = vrr_params;
7549 new_crtc_state->vrr_infopacket = vrr_infopacket;
7551 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7552 new_stream->vrr_infopacket = vrr_infopacket;
7554 if (new_crtc_state->freesync_vrr_info_changed)
7555 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7556 new_crtc_state->base.crtc->base.id,
7557 (int)new_crtc_state->base.vrr_enabled,
7558 (int)vrr_params.state);
7560 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7563 static void update_stream_irq_parameters(
7564 struct amdgpu_display_manager *dm,
7565 struct dm_crtc_state *new_crtc_state)
7567 struct dc_stream_state *new_stream = new_crtc_state->stream;
7568 struct mod_vrr_params vrr_params;
7569 struct mod_freesync_config config = new_crtc_state->freesync_config;
7570 struct amdgpu_device *adev = dm->adev;
7571 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7572 unsigned long flags;
7578 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7579 * For now it's sufficient to just guard against these conditions.
7581 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7584 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7585 vrr_params = acrtc->dm_irq_params.vrr_params;
7587 if (new_crtc_state->vrr_supported &&
7588 config.min_refresh_in_uhz &&
7589 config.max_refresh_in_uhz) {
7590 config.state = new_crtc_state->base.vrr_enabled ?
7591 VRR_STATE_ACTIVE_VARIABLE :
7594 config.state = VRR_STATE_UNSUPPORTED;
7597 mod_freesync_build_vrr_params(dm->freesync_module,
7599 &config, &vrr_params);
7601 new_crtc_state->freesync_timing_changed |=
7602 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7603 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7605 new_crtc_state->freesync_config = config;
7606 /* Copy state for access from DM IRQ handler */
7607 acrtc->dm_irq_params.freesync_config = config;
7608 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7609 acrtc->dm_irq_params.vrr_params = vrr_params;
7610 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7613 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7614 struct dm_crtc_state *new_state)
7616 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7617 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7619 if (!old_vrr_active && new_vrr_active) {
7620 /* Transition VRR inactive -> active:
7621 * While VRR is active, we must not disable vblank irq, as a
7622 * reenable after disable would compute bogus vblank/pflip
7623 * timestamps if it likely happened inside display front-porch.
7625 * We also need vupdate irq for the actual core vblank handling
7628 dm_set_vupdate_irq(new_state->base.crtc, true);
7629 drm_crtc_vblank_get(new_state->base.crtc);
7630 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7631 __func__, new_state->base.crtc->base.id);
7632 } else if (old_vrr_active && !new_vrr_active) {
7633 /* Transition VRR active -> inactive:
7634 * Allow vblank irq disable again for fixed refresh rate.
7636 dm_set_vupdate_irq(new_state->base.crtc, false);
7637 drm_crtc_vblank_put(new_state->base.crtc);
7638 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7639 __func__, new_state->base.crtc->base.id);
7643 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7645 struct drm_plane *plane;
7646 struct drm_plane_state *old_plane_state, *new_plane_state;
7650 * TODO: Make this per-stream so we don't issue redundant updates for
7651 * commits with multiple streams.
7653 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7655 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7656 handle_cursor_update(plane, old_plane_state);
7659 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7660 struct dc_state *dc_state,
7661 struct drm_device *dev,
7662 struct amdgpu_display_manager *dm,
7663 struct drm_crtc *pcrtc,
7664 bool wait_for_vblank)
7667 uint64_t timestamp_ns;
7668 struct drm_plane *plane;
7669 struct drm_plane_state *old_plane_state, *new_plane_state;
7670 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7671 struct drm_crtc_state *new_pcrtc_state =
7672 drm_atomic_get_new_crtc_state(state, pcrtc);
7673 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7674 struct dm_crtc_state *dm_old_crtc_state =
7675 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7676 int planes_count = 0, vpos, hpos;
7678 unsigned long flags;
7679 struct amdgpu_bo *abo;
7680 uint32_t target_vblank, last_flip_vblank;
7681 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7682 bool pflip_present = false;
7684 struct dc_surface_update surface_updates[MAX_SURFACES];
7685 struct dc_plane_info plane_infos[MAX_SURFACES];
7686 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7687 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7688 struct dc_stream_update stream_update;
7691 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7694 dm_error("Failed to allocate update bundle\n");
7699 * Disable the cursor first if we're disabling all the planes.
7700 * It'll remain on the screen after the planes are re-enabled
7703 if (acrtc_state->active_planes == 0)
7704 amdgpu_dm_commit_cursors(state);
7706 /* update planes when needed */
7707 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
7708 struct drm_crtc *crtc = new_plane_state->crtc;
7709 struct drm_crtc_state *new_crtc_state;
7710 struct drm_framebuffer *fb = new_plane_state->fb;
7711 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7712 bool plane_needs_flip;
7713 struct dc_plane_state *dc_plane;
7714 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7716 /* Cursor plane is handled after stream updates */
7717 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7720 if (!fb || !crtc || pcrtc != crtc)
7723 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7724 if (!new_crtc_state->active)
7727 dc_plane = dm_new_plane_state->dc_state;
7729 bundle->surface_updates[planes_count].surface = dc_plane;
7730 if (new_pcrtc_state->color_mgmt_changed) {
7731 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7732 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7733 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7736 fill_dc_scaling_info(new_plane_state,
7737 &bundle->scaling_infos[planes_count]);
7739 bundle->surface_updates[planes_count].scaling_info =
7740 &bundle->scaling_infos[planes_count];
7742 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7744 pflip_present = pflip_present || plane_needs_flip;
7746 if (!plane_needs_flip) {
7751 abo = gem_to_amdgpu_bo(fb->obj[0]);
7754 * Wait for all fences on this FB. Do limited wait to avoid
7755 * deadlock during GPU reset when this fence will not signal
7756 * but we hold reservation lock for the BO.
7758 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7760 msecs_to_jiffies(5000));
7761 if (unlikely(r <= 0))
7762 DRM_ERROR("Waiting for fences timed out!");
7764 fill_dc_plane_info_and_addr(
7765 dm->adev, new_plane_state,
7767 &bundle->plane_infos[planes_count],
7768 &bundle->flip_addrs[planes_count].address,
7769 afb->tmz_surface, false);
7771 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7772 new_plane_state->plane->index,
7773 bundle->plane_infos[planes_count].dcc.enable);
7775 bundle->surface_updates[planes_count].plane_info =
7776 &bundle->plane_infos[planes_count];
7779 * Only allow immediate flips for fast updates that don't
7780 * change FB pitch, DCC state, rotation or mirroing.
7782 bundle->flip_addrs[planes_count].flip_immediate =
7783 crtc->state->async_flip &&
7784 acrtc_state->update_type == UPDATE_TYPE_FAST;
7786 timestamp_ns = ktime_get_ns();
7787 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7788 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7789 bundle->surface_updates[planes_count].surface = dc_plane;
7791 if (!bundle->surface_updates[planes_count].surface) {
7792 DRM_ERROR("No surface for CRTC: id=%d\n",
7793 acrtc_attach->crtc_id);
7797 if (plane == pcrtc->primary)
7798 update_freesync_state_on_stream(
7801 acrtc_state->stream,
7803 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7805 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7807 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7808 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7814 if (pflip_present) {
7816 /* Use old throttling in non-vrr fixed refresh rate mode
7817 * to keep flip scheduling based on target vblank counts
7818 * working in a backwards compatible way, e.g., for
7819 * clients using the GLX_OML_sync_control extension or
7820 * DRI3/Present extension with defined target_msc.
7822 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7825 /* For variable refresh rate mode only:
7826 * Get vblank of last completed flip to avoid > 1 vrr
7827 * flips per video frame by use of throttling, but allow
7828 * flip programming anywhere in the possibly large
7829 * variable vrr vblank interval for fine-grained flip
7830 * timing control and more opportunity to avoid stutter
7831 * on late submission of flips.
7833 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7834 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7835 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7838 target_vblank = last_flip_vblank + wait_for_vblank;
7841 * Wait until we're out of the vertical blank period before the one
7842 * targeted by the flip
7844 while ((acrtc_attach->enabled &&
7845 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7846 0, &vpos, &hpos, NULL,
7847 NULL, &pcrtc->hwmode)
7848 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7849 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7850 (int)(target_vblank -
7851 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7852 usleep_range(1000, 1100);
7856 * Prepare the flip event for the pageflip interrupt to handle.
7858 * This only works in the case where we've already turned on the
7859 * appropriate hardware blocks (eg. HUBP) so in the transition case
7860 * from 0 -> n planes we have to skip a hardware generated event
7861 * and rely on sending it from software.
7863 if (acrtc_attach->base.state->event &&
7864 acrtc_state->active_planes > 0) {
7865 drm_crtc_vblank_get(pcrtc);
7867 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7869 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7870 prepare_flip_isr(acrtc_attach);
7872 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7875 if (acrtc_state->stream) {
7876 if (acrtc_state->freesync_vrr_info_changed)
7877 bundle->stream_update.vrr_infopacket =
7878 &acrtc_state->stream->vrr_infopacket;
7882 /* Update the planes if changed or disable if we don't have any. */
7883 if ((planes_count || acrtc_state->active_planes == 0) &&
7884 acrtc_state->stream) {
7885 bundle->stream_update.stream = acrtc_state->stream;
7886 if (new_pcrtc_state->mode_changed) {
7887 bundle->stream_update.src = acrtc_state->stream->src;
7888 bundle->stream_update.dst = acrtc_state->stream->dst;
7891 if (new_pcrtc_state->color_mgmt_changed) {
7893 * TODO: This isn't fully correct since we've actually
7894 * already modified the stream in place.
7896 bundle->stream_update.gamut_remap =
7897 &acrtc_state->stream->gamut_remap_matrix;
7898 bundle->stream_update.output_csc_transform =
7899 &acrtc_state->stream->csc_color_matrix;
7900 bundle->stream_update.out_transfer_func =
7901 acrtc_state->stream->out_transfer_func;
7904 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7905 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7906 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7909 * If FreeSync state on the stream has changed then we need to
7910 * re-adjust the min/max bounds now that DC doesn't handle this
7911 * as part of commit.
7913 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7914 amdgpu_dm_vrr_active(acrtc_state)) {
7915 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7916 dc_stream_adjust_vmin_vmax(
7917 dm->dc, acrtc_state->stream,
7918 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7919 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7921 mutex_lock(&dm->dc_lock);
7922 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7923 acrtc_state->stream->link->psr_settings.psr_allow_active)
7924 amdgpu_dm_psr_disable(acrtc_state->stream);
7926 dc_commit_updates_for_stream(dm->dc,
7927 bundle->surface_updates,
7929 acrtc_state->stream,
7930 &bundle->stream_update);
7933 * Enable or disable the interrupts on the backend.
7935 * Most pipes are put into power gating when unused.
7937 * When power gating is enabled on a pipe we lose the
7938 * interrupt enablement state when power gating is disabled.
7940 * So we need to update the IRQ control state in hardware
7941 * whenever the pipe turns on (since it could be previously
7942 * power gated) or off (since some pipes can't be power gated
7945 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7946 dm_update_pflip_irq_state(drm_to_adev(dev),
7949 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7950 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7951 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7952 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7953 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7954 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7955 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7956 amdgpu_dm_psr_enable(acrtc_state->stream);
7959 mutex_unlock(&dm->dc_lock);
7963 * Update cursor state *after* programming all the planes.
7964 * This avoids redundant programming in the case where we're going
7965 * to be disabling a single plane - those pipes are being disabled.
7967 if (acrtc_state->active_planes)
7968 amdgpu_dm_commit_cursors(state);
7974 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7975 struct drm_atomic_state *state)
7977 struct amdgpu_device *adev = drm_to_adev(dev);
7978 struct amdgpu_dm_connector *aconnector;
7979 struct drm_connector *connector;
7980 struct drm_connector_state *old_con_state, *new_con_state;
7981 struct drm_crtc_state *new_crtc_state;
7982 struct dm_crtc_state *new_dm_crtc_state;
7983 const struct dc_stream_status *status;
7986 /* Notify device removals. */
7987 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7988 if (old_con_state->crtc != new_con_state->crtc) {
7989 /* CRTC changes require notification. */
7993 if (!new_con_state->crtc)
7996 new_crtc_state = drm_atomic_get_new_crtc_state(
7997 state, new_con_state->crtc);
7999 if (!new_crtc_state)
8002 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8006 aconnector = to_amdgpu_dm_connector(connector);
8008 mutex_lock(&adev->dm.audio_lock);
8009 inst = aconnector->audio_inst;
8010 aconnector->audio_inst = -1;
8011 mutex_unlock(&adev->dm.audio_lock);
8013 amdgpu_dm_audio_eld_notify(adev, inst);
8016 /* Notify audio device additions. */
8017 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8018 if (!new_con_state->crtc)
8021 new_crtc_state = drm_atomic_get_new_crtc_state(
8022 state, new_con_state->crtc);
8024 if (!new_crtc_state)
8027 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8030 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8031 if (!new_dm_crtc_state->stream)
8034 status = dc_stream_get_status(new_dm_crtc_state->stream);
8038 aconnector = to_amdgpu_dm_connector(connector);
8040 mutex_lock(&adev->dm.audio_lock);
8041 inst = status->audio_inst;
8042 aconnector->audio_inst = inst;
8043 mutex_unlock(&adev->dm.audio_lock);
8045 amdgpu_dm_audio_eld_notify(adev, inst);
8050 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8051 * @crtc_state: the DRM CRTC state
8052 * @stream_state: the DC stream state.
8054 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8055 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8057 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8058 struct dc_stream_state *stream_state)
8060 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8064 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8065 * @state: The atomic state to commit
8067 * This will tell DC to commit the constructed DC state from atomic_check,
8068 * programming the hardware. Any failures here implies a hardware failure, since
8069 * atomic check should have filtered anything non-kosher.
8071 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8073 struct drm_device *dev = state->dev;
8074 struct amdgpu_device *adev = drm_to_adev(dev);
8075 struct amdgpu_display_manager *dm = &adev->dm;
8076 struct dm_atomic_state *dm_state;
8077 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8079 struct drm_crtc *crtc;
8080 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8081 unsigned long flags;
8082 bool wait_for_vblank = true;
8083 struct drm_connector *connector;
8084 struct drm_connector_state *old_con_state, *new_con_state;
8085 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8086 int crtc_disable_count = 0;
8087 bool mode_set_reset_required = false;
8089 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8091 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8093 dm_state = dm_atomic_get_new_state(state);
8094 if (dm_state && dm_state->context) {
8095 dc_state = dm_state->context;
8097 /* No state changes, retain current state. */
8098 dc_state_temp = dc_create_state(dm->dc);
8099 ASSERT(dc_state_temp);
8100 dc_state = dc_state_temp;
8101 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8104 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8105 new_crtc_state, i) {
8106 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8108 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8110 if (old_crtc_state->active &&
8111 (!new_crtc_state->active ||
8112 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8113 manage_dm_interrupts(adev, acrtc, false);
8114 dc_stream_release(dm_old_crtc_state->stream);
8118 drm_atomic_helper_calc_timestamping_constants(state);
8120 /* update changed items */
8121 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8122 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8124 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8125 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8128 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8129 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8130 "connectors_changed:%d\n",
8132 new_crtc_state->enable,
8133 new_crtc_state->active,
8134 new_crtc_state->planes_changed,
8135 new_crtc_state->mode_changed,
8136 new_crtc_state->active_changed,
8137 new_crtc_state->connectors_changed);
8139 /* Disable cursor if disabling crtc */
8140 if (old_crtc_state->active && !new_crtc_state->active) {
8141 struct dc_cursor_position position;
8143 memset(&position, 0, sizeof(position));
8144 mutex_lock(&dm->dc_lock);
8145 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8146 mutex_unlock(&dm->dc_lock);
8149 /* Copy all transient state flags into dc state */
8150 if (dm_new_crtc_state->stream) {
8151 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8152 dm_new_crtc_state->stream);
8155 /* handles headless hotplug case, updating new_state and
8156 * aconnector as needed
8159 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8161 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8163 if (!dm_new_crtc_state->stream) {
8165 * this could happen because of issues with
8166 * userspace notifications delivery.
8167 * In this case userspace tries to set mode on
8168 * display which is disconnected in fact.
8169 * dc_sink is NULL in this case on aconnector.
8170 * We expect reset mode will come soon.
8172 * This can also happen when unplug is done
8173 * during resume sequence ended
8175 * In this case, we want to pretend we still
8176 * have a sink to keep the pipe running so that
8177 * hw state is consistent with the sw state
8179 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8180 __func__, acrtc->base.base.id);
8184 if (dm_old_crtc_state->stream)
8185 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8187 pm_runtime_get_noresume(dev->dev);
8189 acrtc->enabled = true;
8190 acrtc->hw_mode = new_crtc_state->mode;
8191 crtc->hwmode = new_crtc_state->mode;
8192 mode_set_reset_required = true;
8193 } else if (modereset_required(new_crtc_state)) {
8194 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8195 /* i.e. reset mode */
8196 if (dm_old_crtc_state->stream)
8197 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8198 mode_set_reset_required = true;
8200 } /* for_each_crtc_in_state() */
8203 /* if there mode set or reset, disable eDP PSR */
8204 if (mode_set_reset_required)
8205 amdgpu_dm_psr_disable_all(dm);
8207 dm_enable_per_frame_crtc_master_sync(dc_state);
8208 mutex_lock(&dm->dc_lock);
8209 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8210 mutex_unlock(&dm->dc_lock);
8213 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8214 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8216 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8218 if (dm_new_crtc_state->stream != NULL) {
8219 const struct dc_stream_status *status =
8220 dc_stream_get_status(dm_new_crtc_state->stream);
8223 status = dc_stream_get_status_from_state(dc_state,
8224 dm_new_crtc_state->stream);
8226 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8228 acrtc->otg_inst = status->primary_otg_inst;
8231 #ifdef CONFIG_DRM_AMD_DC_HDCP
8232 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8233 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8234 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8235 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8237 new_crtc_state = NULL;
8240 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8242 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8244 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8245 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8246 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8247 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8248 dm_new_con_state->update_hdcp = true;
8252 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8253 hdcp_update_display(
8254 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8255 new_con_state->hdcp_content_type,
8256 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8261 /* Handle connector state changes */
8262 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8263 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8264 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8265 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8266 struct dc_surface_update surface_updates[MAX_SURFACES];
8267 struct dc_stream_update stream_update;
8268 struct dc_info_packet hdr_packet;
8269 struct dc_stream_status *status = NULL;
8270 bool abm_changed, hdr_changed, scaling_changed;
8272 memset(&surface_updates, 0, sizeof(surface_updates));
8273 memset(&stream_update, 0, sizeof(stream_update));
8276 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8277 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8280 /* Skip any modesets/resets */
8281 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8284 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8285 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8287 scaling_changed = is_scaling_state_different(dm_new_con_state,
8290 abm_changed = dm_new_crtc_state->abm_level !=
8291 dm_old_crtc_state->abm_level;
8294 is_hdr_metadata_different(old_con_state, new_con_state);
8296 if (!scaling_changed && !abm_changed && !hdr_changed)
8299 stream_update.stream = dm_new_crtc_state->stream;
8300 if (scaling_changed) {
8301 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8302 dm_new_con_state, dm_new_crtc_state->stream);
8304 stream_update.src = dm_new_crtc_state->stream->src;
8305 stream_update.dst = dm_new_crtc_state->stream->dst;
8309 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8311 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8315 fill_hdr_info_packet(new_con_state, &hdr_packet);
8316 stream_update.hdr_static_metadata = &hdr_packet;
8319 status = dc_stream_get_status(dm_new_crtc_state->stream);
8321 WARN_ON(!status->plane_count);
8324 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8325 * Here we create an empty update on each plane.
8326 * To fix this, DC should permit updating only stream properties.
8328 for (j = 0; j < status->plane_count; j++)
8329 surface_updates[j].surface = status->plane_states[j];
8332 mutex_lock(&dm->dc_lock);
8333 dc_commit_updates_for_stream(dm->dc,
8335 status->plane_count,
8336 dm_new_crtc_state->stream,
8338 mutex_unlock(&dm->dc_lock);
8341 /* Count number of newly disabled CRTCs for dropping PM refs later. */
8342 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8343 new_crtc_state, i) {
8344 if (old_crtc_state->active && !new_crtc_state->active)
8345 crtc_disable_count++;
8347 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8348 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8350 /* For freesync config update on crtc state and params for irq */
8351 update_stream_irq_parameters(dm, dm_new_crtc_state);
8353 /* Handle vrr on->off / off->on transitions */
8354 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8359 * Enable interrupts for CRTCs that are newly enabled or went through
8360 * a modeset. It was intentionally deferred until after the front end
8361 * state was modified to wait until the OTG was on and so the IRQ
8362 * handlers didn't access stale or invalid state.
8364 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8365 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8367 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8369 if (new_crtc_state->active &&
8370 (!old_crtc_state->active ||
8371 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8372 dc_stream_retain(dm_new_crtc_state->stream);
8373 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8374 manage_dm_interrupts(adev, acrtc, true);
8376 #ifdef CONFIG_DEBUG_FS
8378 * Frontend may have changed so reapply the CRC capture
8379 * settings for the stream.
8381 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8383 if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8384 amdgpu_dm_crtc_configure_crc_source(
8385 crtc, dm_new_crtc_state,
8386 dm_new_crtc_state->crc_src);
8392 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8393 if (new_crtc_state->async_flip)
8394 wait_for_vblank = false;
8396 /* update planes when needed per crtc*/
8397 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8398 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8400 if (dm_new_crtc_state->stream)
8401 amdgpu_dm_commit_planes(state, dc_state, dev,
8402 dm, crtc, wait_for_vblank);
8405 /* Update audio instances for each connector. */
8406 amdgpu_dm_commit_audio(dev, state);
8409 * send vblank event on all events not handled in flip and
8410 * mark consumed event for drm_atomic_helper_commit_hw_done
8412 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8413 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8415 if (new_crtc_state->event)
8416 drm_send_event_locked(dev, &new_crtc_state->event->base);
8418 new_crtc_state->event = NULL;
8420 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8422 /* Signal HW programming completion */
8423 drm_atomic_helper_commit_hw_done(state);
8425 if (wait_for_vblank)
8426 drm_atomic_helper_wait_for_flip_done(dev, state);
8428 drm_atomic_helper_cleanup_planes(dev, state);
8430 /* return the stolen vga memory back to VRAM */
8431 if (!adev->mman.keep_stolen_vga_memory)
8432 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8433 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8436 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8437 * so we can put the GPU into runtime suspend if we're not driving any
8440 for (i = 0; i < crtc_disable_count; i++)
8441 pm_runtime_put_autosuspend(dev->dev);
8442 pm_runtime_mark_last_busy(dev->dev);
8445 dc_release_state(dc_state_temp);
8449 static int dm_force_atomic_commit(struct drm_connector *connector)
8452 struct drm_device *ddev = connector->dev;
8453 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8454 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8455 struct drm_plane *plane = disconnected_acrtc->base.primary;
8456 struct drm_connector_state *conn_state;
8457 struct drm_crtc_state *crtc_state;
8458 struct drm_plane_state *plane_state;
8463 state->acquire_ctx = ddev->mode_config.acquire_ctx;
8465 /* Construct an atomic state to restore previous display setting */
8468 * Attach connectors to drm_atomic_state
8470 conn_state = drm_atomic_get_connector_state(state, connector);
8472 ret = PTR_ERR_OR_ZERO(conn_state);
8476 /* Attach crtc to drm_atomic_state*/
8477 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8479 ret = PTR_ERR_OR_ZERO(crtc_state);
8483 /* force a restore */
8484 crtc_state->mode_changed = true;
8486 /* Attach plane to drm_atomic_state */
8487 plane_state = drm_atomic_get_plane_state(state, plane);
8489 ret = PTR_ERR_OR_ZERO(plane_state);
8493 /* Call commit internally with the state we just constructed */
8494 ret = drm_atomic_commit(state);
8497 drm_atomic_state_put(state);
8499 DRM_ERROR("Restoring old state failed with %i\n", ret);
8505 * This function handles all cases when set mode does not come upon hotplug.
8506 * This includes when a display is unplugged then plugged back into the
8507 * same port and when running without usermode desktop manager supprot
8509 void dm_restore_drm_connector_state(struct drm_device *dev,
8510 struct drm_connector *connector)
8512 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8513 struct amdgpu_crtc *disconnected_acrtc;
8514 struct dm_crtc_state *acrtc_state;
8516 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8519 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8520 if (!disconnected_acrtc)
8523 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8524 if (!acrtc_state->stream)
8528 * If the previous sink is not released and different from the current,
8529 * we deduce we are in a state where we can not rely on usermode call
8530 * to turn on the display, so we do it here
8532 if (acrtc_state->stream->sink != aconnector->dc_sink)
8533 dm_force_atomic_commit(&aconnector->base);
8537 * Grabs all modesetting locks to serialize against any blocking commits,
8538 * Waits for completion of all non blocking commits.
8540 static int do_aquire_global_lock(struct drm_device *dev,
8541 struct drm_atomic_state *state)
8543 struct drm_crtc *crtc;
8544 struct drm_crtc_commit *commit;
8548 * Adding all modeset locks to aquire_ctx will
8549 * ensure that when the framework release it the
8550 * extra locks we are locking here will get released to
8552 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8556 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8557 spin_lock(&crtc->commit_lock);
8558 commit = list_first_entry_or_null(&crtc->commit_list,
8559 struct drm_crtc_commit, commit_entry);
8561 drm_crtc_commit_get(commit);
8562 spin_unlock(&crtc->commit_lock);
8568 * Make sure all pending HW programming completed and
8571 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8574 ret = wait_for_completion_interruptible_timeout(
8575 &commit->flip_done, 10*HZ);
8578 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8579 "timed out\n", crtc->base.id, crtc->name);
8581 drm_crtc_commit_put(commit);
8584 return ret < 0 ? ret : 0;
8587 static void get_freesync_config_for_crtc(
8588 struct dm_crtc_state *new_crtc_state,
8589 struct dm_connector_state *new_con_state)
8591 struct mod_freesync_config config = {0};
8592 struct amdgpu_dm_connector *aconnector =
8593 to_amdgpu_dm_connector(new_con_state->base.connector);
8594 struct drm_display_mode *mode = &new_crtc_state->base.mode;
8595 int vrefresh = drm_mode_vrefresh(mode);
8597 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8598 vrefresh >= aconnector->min_vfreq &&
8599 vrefresh <= aconnector->max_vfreq;
8601 if (new_crtc_state->vrr_supported) {
8602 new_crtc_state->stream->ignore_msa_timing_param = true;
8603 config.state = new_crtc_state->base.vrr_enabled ?
8604 VRR_STATE_ACTIVE_VARIABLE :
8606 config.min_refresh_in_uhz =
8607 aconnector->min_vfreq * 1000000;
8608 config.max_refresh_in_uhz =
8609 aconnector->max_vfreq * 1000000;
8610 config.vsif_supported = true;
8614 new_crtc_state->freesync_config = config;
8617 static void reset_freesync_config_for_crtc(
8618 struct dm_crtc_state *new_crtc_state)
8620 new_crtc_state->vrr_supported = false;
8622 memset(&new_crtc_state->vrr_infopacket, 0,
8623 sizeof(new_crtc_state->vrr_infopacket));
8626 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8627 struct drm_atomic_state *state,
8628 struct drm_crtc *crtc,
8629 struct drm_crtc_state *old_crtc_state,
8630 struct drm_crtc_state *new_crtc_state,
8632 bool *lock_and_validation_needed)
8634 struct dm_atomic_state *dm_state = NULL;
8635 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8636 struct dc_stream_state *new_stream;
8640 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8641 * update changed items
8643 struct amdgpu_crtc *acrtc = NULL;
8644 struct amdgpu_dm_connector *aconnector = NULL;
8645 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8646 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8650 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8651 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8652 acrtc = to_amdgpu_crtc(crtc);
8653 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8655 /* TODO This hack should go away */
8656 if (aconnector && enable) {
8657 /* Make sure fake sink is created in plug-in scenario */
8658 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8660 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8663 if (IS_ERR(drm_new_conn_state)) {
8664 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8668 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8669 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8671 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8674 new_stream = create_validate_stream_for_sink(aconnector,
8675 &new_crtc_state->mode,
8677 dm_old_crtc_state->stream);
8680 * we can have no stream on ACTION_SET if a display
8681 * was disconnected during S3, in this case it is not an
8682 * error, the OS will be updated after detection, and
8683 * will do the right thing on next atomic commit
8687 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8688 __func__, acrtc->base.base.id);
8694 * TODO: Check VSDB bits to decide whether this should
8695 * be enabled or not.
8697 new_stream->triggered_crtc_reset.enabled =
8698 dm->force_timing_sync;
8700 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8702 ret = fill_hdr_info_packet(drm_new_conn_state,
8703 &new_stream->hdr_static_metadata);
8708 * If we already removed the old stream from the context
8709 * (and set the new stream to NULL) then we can't reuse
8710 * the old stream even if the stream and scaling are unchanged.
8711 * We'll hit the BUG_ON and black screen.
8713 * TODO: Refactor this function to allow this check to work
8714 * in all conditions.
8716 if (dm_new_crtc_state->stream &&
8717 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8718 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8719 new_crtc_state->mode_changed = false;
8720 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8721 new_crtc_state->mode_changed);
8725 /* mode_changed flag may get updated above, need to check again */
8726 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8730 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8731 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8732 "connectors_changed:%d\n",
8734 new_crtc_state->enable,
8735 new_crtc_state->active,
8736 new_crtc_state->planes_changed,
8737 new_crtc_state->mode_changed,
8738 new_crtc_state->active_changed,
8739 new_crtc_state->connectors_changed);
8741 /* Remove stream for any changed/disabled CRTC */
8744 if (!dm_old_crtc_state->stream)
8747 ret = dm_atomic_get_state(state, &dm_state);
8751 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8754 /* i.e. reset mode */
8755 if (dc_remove_stream_from_ctx(
8758 dm_old_crtc_state->stream) != DC_OK) {
8763 dc_stream_release(dm_old_crtc_state->stream);
8764 dm_new_crtc_state->stream = NULL;
8766 reset_freesync_config_for_crtc(dm_new_crtc_state);
8768 *lock_and_validation_needed = true;
8770 } else {/* Add stream for any updated/enabled CRTC */
8772 * Quick fix to prevent NULL pointer on new_stream when
8773 * added MST connectors not found in existing crtc_state in the chained mode
8774 * TODO: need to dig out the root cause of that
8776 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8779 if (modereset_required(new_crtc_state))
8782 if (modeset_required(new_crtc_state, new_stream,
8783 dm_old_crtc_state->stream)) {
8785 WARN_ON(dm_new_crtc_state->stream);
8787 ret = dm_atomic_get_state(state, &dm_state);
8791 dm_new_crtc_state->stream = new_stream;
8793 dc_stream_retain(new_stream);
8795 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8798 if (dc_add_stream_to_ctx(
8801 dm_new_crtc_state->stream) != DC_OK) {
8806 *lock_and_validation_needed = true;
8811 /* Release extra reference */
8813 dc_stream_release(new_stream);
8816 * We want to do dc stream updates that do not require a
8817 * full modeset below.
8819 if (!(enable && aconnector && new_crtc_state->active))
8822 * Given above conditions, the dc state cannot be NULL because:
8823 * 1. We're in the process of enabling CRTCs (just been added
8824 * to the dc context, or already is on the context)
8825 * 2. Has a valid connector attached, and
8826 * 3. Is currently active and enabled.
8827 * => The dc stream state currently exists.
8829 BUG_ON(dm_new_crtc_state->stream == NULL);
8831 /* Scaling or underscan settings */
8832 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8833 update_stream_scaling_settings(
8834 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8837 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8840 * Color management settings. We also update color properties
8841 * when a modeset is needed, to ensure it gets reprogrammed.
8843 if (dm_new_crtc_state->base.color_mgmt_changed ||
8844 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8845 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8850 /* Update Freesync settings. */
8851 get_freesync_config_for_crtc(dm_new_crtc_state,
8858 dc_stream_release(new_stream);
8862 static bool should_reset_plane(struct drm_atomic_state *state,
8863 struct drm_plane *plane,
8864 struct drm_plane_state *old_plane_state,
8865 struct drm_plane_state *new_plane_state)
8867 struct drm_plane *other;
8868 struct drm_plane_state *old_other_state, *new_other_state;
8869 struct drm_crtc_state *new_crtc_state;
8873 * TODO: Remove this hack once the checks below are sufficient
8874 * enough to determine when we need to reset all the planes on
8877 if (state->allow_modeset)
8880 /* Exit early if we know that we're adding or removing the plane. */
8881 if (old_plane_state->crtc != new_plane_state->crtc)
8884 /* old crtc == new_crtc == NULL, plane not in context. */
8885 if (!new_plane_state->crtc)
8889 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8891 if (!new_crtc_state)
8894 /* CRTC Degamma changes currently require us to recreate planes. */
8895 if (new_crtc_state->color_mgmt_changed)
8898 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8902 * If there are any new primary or overlay planes being added or
8903 * removed then the z-order can potentially change. To ensure
8904 * correct z-order and pipe acquisition the current DC architecture
8905 * requires us to remove and recreate all existing planes.
8907 * TODO: Come up with a more elegant solution for this.
8909 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8910 struct amdgpu_framebuffer *old_afb, *new_afb;
8911 if (other->type == DRM_PLANE_TYPE_CURSOR)
8914 if (old_other_state->crtc != new_plane_state->crtc &&
8915 new_other_state->crtc != new_plane_state->crtc)
8918 if (old_other_state->crtc != new_other_state->crtc)
8921 /* Src/dst size and scaling updates. */
8922 if (old_other_state->src_w != new_other_state->src_w ||
8923 old_other_state->src_h != new_other_state->src_h ||
8924 old_other_state->crtc_w != new_other_state->crtc_w ||
8925 old_other_state->crtc_h != new_other_state->crtc_h)
8928 /* Rotation / mirroring updates. */
8929 if (old_other_state->rotation != new_other_state->rotation)
8932 /* Blending updates. */
8933 if (old_other_state->pixel_blend_mode !=
8934 new_other_state->pixel_blend_mode)
8937 /* Alpha updates. */
8938 if (old_other_state->alpha != new_other_state->alpha)
8941 /* Colorspace changes. */
8942 if (old_other_state->color_range != new_other_state->color_range ||
8943 old_other_state->color_encoding != new_other_state->color_encoding)
8946 /* Framebuffer checks fall at the end. */
8947 if (!old_other_state->fb || !new_other_state->fb)
8950 /* Pixel format changes can require bandwidth updates. */
8951 if (old_other_state->fb->format != new_other_state->fb->format)
8954 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8955 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
8957 /* Tiling and DCC changes also require bandwidth updates. */
8958 if (old_afb->tiling_flags != new_afb->tiling_flags ||
8959 old_afb->base.modifier != new_afb->base.modifier)
8966 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
8967 struct drm_plane_state *new_plane_state,
8968 struct drm_framebuffer *fb)
8970 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
8971 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
8975 if (fb->width > new_acrtc->max_cursor_width ||
8976 fb->height > new_acrtc->max_cursor_height) {
8977 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
8978 new_plane_state->fb->width,
8979 new_plane_state->fb->height);
8982 if (new_plane_state->src_w != fb->width << 16 ||
8983 new_plane_state->src_h != fb->height << 16) {
8984 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
8988 /* Pitch in pixels */
8989 pitch = fb->pitches[0] / fb->format->cpp[0];
8991 if (fb->width != pitch) {
8992 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9001 /* FB pitch is supported by cursor plane */
9004 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9008 /* Core DRM takes care of checking FB modifiers, so we only need to
9009 * check tiling flags when the FB doesn't have a modifier. */
9010 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9011 if (adev->family < AMDGPU_FAMILY_AI) {
9012 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9013 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9014 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9016 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9019 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9027 static int dm_update_plane_state(struct dc *dc,
9028 struct drm_atomic_state *state,
9029 struct drm_plane *plane,
9030 struct drm_plane_state *old_plane_state,
9031 struct drm_plane_state *new_plane_state,
9033 bool *lock_and_validation_needed)
9036 struct dm_atomic_state *dm_state = NULL;
9037 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9038 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9039 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9040 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9041 struct amdgpu_crtc *new_acrtc;
9046 new_plane_crtc = new_plane_state->crtc;
9047 old_plane_crtc = old_plane_state->crtc;
9048 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9049 dm_old_plane_state = to_dm_plane_state(old_plane_state);
9051 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9052 if (!enable || !new_plane_crtc ||
9053 drm_atomic_plane_disabling(plane->state, new_plane_state))
9056 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9058 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9059 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9063 if (new_plane_state->fb) {
9064 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9065 new_plane_state->fb);
9073 needs_reset = should_reset_plane(state, plane, old_plane_state,
9076 /* Remove any changed/removed planes */
9081 if (!old_plane_crtc)
9084 old_crtc_state = drm_atomic_get_old_crtc_state(
9085 state, old_plane_crtc);
9086 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9088 if (!dm_old_crtc_state->stream)
9091 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9092 plane->base.id, old_plane_crtc->base.id);
9094 ret = dm_atomic_get_state(state, &dm_state);
9098 if (!dc_remove_plane_from_context(
9100 dm_old_crtc_state->stream,
9101 dm_old_plane_state->dc_state,
9102 dm_state->context)) {
9108 dc_plane_state_release(dm_old_plane_state->dc_state);
9109 dm_new_plane_state->dc_state = NULL;
9111 *lock_and_validation_needed = true;
9113 } else { /* Add new planes */
9114 struct dc_plane_state *dc_new_plane_state;
9116 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9119 if (!new_plane_crtc)
9122 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9123 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9125 if (!dm_new_crtc_state->stream)
9131 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9135 WARN_ON(dm_new_plane_state->dc_state);
9137 dc_new_plane_state = dc_create_plane_state(dc);
9138 if (!dc_new_plane_state)
9141 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9142 plane->base.id, new_plane_crtc->base.id);
9144 ret = fill_dc_plane_attributes(
9145 drm_to_adev(new_plane_crtc->dev),
9150 dc_plane_state_release(dc_new_plane_state);
9154 ret = dm_atomic_get_state(state, &dm_state);
9156 dc_plane_state_release(dc_new_plane_state);
9161 * Any atomic check errors that occur after this will
9162 * not need a release. The plane state will be attached
9163 * to the stream, and therefore part of the atomic
9164 * state. It'll be released when the atomic state is
9167 if (!dc_add_plane_to_context(
9169 dm_new_crtc_state->stream,
9171 dm_state->context)) {
9173 dc_plane_state_release(dc_new_plane_state);
9177 dm_new_plane_state->dc_state = dc_new_plane_state;
9179 /* Tell DC to do a full surface update every time there
9180 * is a plane change. Inefficient, but works for now.
9182 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9184 *lock_and_validation_needed = true;
9191 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9192 struct drm_crtc *crtc,
9193 struct drm_crtc_state *new_crtc_state)
9195 struct drm_plane_state *new_cursor_state, *new_primary_state;
9196 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9198 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9199 * cursor per pipe but it's going to inherit the scaling and
9200 * positioning from the underlying pipe. Check the cursor plane's
9201 * blending properties match the primary plane's. */
9203 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9204 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9205 if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9209 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9210 (new_cursor_state->src_w >> 16);
9211 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9212 (new_cursor_state->src_h >> 16);
9214 primary_scale_w = new_primary_state->crtc_w * 1000 /
9215 (new_primary_state->src_w >> 16);
9216 primary_scale_h = new_primary_state->crtc_h * 1000 /
9217 (new_primary_state->src_h >> 16);
9219 if (cursor_scale_w != primary_scale_w ||
9220 cursor_scale_h != primary_scale_h) {
9221 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9228 #if defined(CONFIG_DRM_AMD_DC_DCN)
9229 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9231 struct drm_connector *connector;
9232 struct drm_connector_state *conn_state;
9233 struct amdgpu_dm_connector *aconnector = NULL;
9235 for_each_new_connector_in_state(state, connector, conn_state, i) {
9236 if (conn_state->crtc != crtc)
9239 aconnector = to_amdgpu_dm_connector(connector);
9240 if (!aconnector->port || !aconnector->mst_port)
9249 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9254 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9255 * @dev: The DRM device
9256 * @state: The atomic state to commit
9258 * Validate that the given atomic state is programmable by DC into hardware.
9259 * This involves constructing a &struct dc_state reflecting the new hardware
9260 * state we wish to commit, then querying DC to see if it is programmable. It's
9261 * important not to modify the existing DC state. Otherwise, atomic_check
9262 * may unexpectedly commit hardware changes.
9264 * When validating the DC state, it's important that the right locks are
9265 * acquired. For full updates case which removes/adds/updates streams on one
9266 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9267 * that any such full update commit will wait for completion of any outstanding
9268 * flip using DRMs synchronization events.
9270 * Note that DM adds the affected connectors for all CRTCs in state, when that
9271 * might not seem necessary. This is because DC stream creation requires the
9272 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9273 * be possible but non-trivial - a possible TODO item.
9275 * Return: -Error code if validation failed.
9277 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9278 struct drm_atomic_state *state)
9280 struct amdgpu_device *adev = drm_to_adev(dev);
9281 struct dm_atomic_state *dm_state = NULL;
9282 struct dc *dc = adev->dm.dc;
9283 struct drm_connector *connector;
9284 struct drm_connector_state *old_con_state, *new_con_state;
9285 struct drm_crtc *crtc;
9286 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9287 struct drm_plane *plane;
9288 struct drm_plane_state *old_plane_state, *new_plane_state;
9289 enum dc_status status;
9291 bool lock_and_validation_needed = false;
9292 struct dm_crtc_state *dm_old_crtc_state;
9294 trace_amdgpu_dm_atomic_check_begin(state);
9296 ret = drm_atomic_helper_check_modeset(dev, state);
9300 /* Check connector changes */
9301 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9302 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9303 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9305 /* Skip connectors that are disabled or part of modeset already. */
9306 if (!old_con_state->crtc && !new_con_state->crtc)
9309 if (!new_con_state->crtc)
9312 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9313 if (IS_ERR(new_crtc_state)) {
9314 ret = PTR_ERR(new_crtc_state);
9318 if (dm_old_con_state->abm_level !=
9319 dm_new_con_state->abm_level)
9320 new_crtc_state->connectors_changed = true;
9323 #if defined(CONFIG_DRM_AMD_DC_DCN)
9324 if (adev->asic_type >= CHIP_NAVI10) {
9325 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9326 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9327 ret = add_affected_mst_dsc_crtcs(state, crtc);
9334 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9335 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9337 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9338 !new_crtc_state->color_mgmt_changed &&
9339 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9340 dm_old_crtc_state->dsc_force_changed == false)
9343 if (!new_crtc_state->enable)
9346 ret = drm_atomic_add_affected_connectors(state, crtc);
9350 ret = drm_atomic_add_affected_planes(state, crtc);
9354 if (dm_old_crtc_state->dsc_force_changed)
9355 new_crtc_state->mode_changed = true;
9359 * Add all primary and overlay planes on the CRTC to the state
9360 * whenever a plane is enabled to maintain correct z-ordering
9361 * and to enable fast surface updates.
9363 drm_for_each_crtc(crtc, dev) {
9364 bool modified = false;
9366 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9367 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9370 if (new_plane_state->crtc == crtc ||
9371 old_plane_state->crtc == crtc) {
9380 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9381 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9385 drm_atomic_get_plane_state(state, plane);
9387 if (IS_ERR(new_plane_state)) {
9388 ret = PTR_ERR(new_plane_state);
9394 /* Remove exiting planes if they are modified */
9395 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9396 ret = dm_update_plane_state(dc, state, plane,
9400 &lock_and_validation_needed);
9405 /* Disable all crtcs which require disable */
9406 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9407 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9411 &lock_and_validation_needed);
9416 /* Enable all crtcs which require enable */
9417 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9418 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9422 &lock_and_validation_needed);
9427 /* Add new/modified planes */
9428 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9429 ret = dm_update_plane_state(dc, state, plane,
9433 &lock_and_validation_needed);
9438 /* Run this here since we want to validate the streams we created */
9439 ret = drm_atomic_helper_check_planes(dev, state);
9443 /* Check cursor planes scaling */
9444 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9445 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9450 if (state->legacy_cursor_update) {
9452 * This is a fast cursor update coming from the plane update
9453 * helper, check if it can be done asynchronously for better
9456 state->async_update =
9457 !drm_atomic_helper_async_check(dev, state);
9460 * Skip the remaining global validation if this is an async
9461 * update. Cursor updates can be done without affecting
9462 * state or bandwidth calcs and this avoids the performance
9463 * penalty of locking the private state object and
9464 * allocating a new dc_state.
9466 if (state->async_update)
9470 /* Check scaling and underscan changes*/
9471 /* TODO Removed scaling changes validation due to inability to commit
9472 * new stream into context w\o causing full reset. Need to
9473 * decide how to handle.
9475 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9476 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9477 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9478 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9480 /* Skip any modesets/resets */
9481 if (!acrtc || drm_atomic_crtc_needs_modeset(
9482 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9485 /* Skip any thing not scale or underscan changes */
9486 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9489 lock_and_validation_needed = true;
9493 * Streams and planes are reset when there are changes that affect
9494 * bandwidth. Anything that affects bandwidth needs to go through
9495 * DC global validation to ensure that the configuration can be applied
9498 * We have to currently stall out here in atomic_check for outstanding
9499 * commits to finish in this case because our IRQ handlers reference
9500 * DRM state directly - we can end up disabling interrupts too early
9503 * TODO: Remove this stall and drop DM state private objects.
9505 if (lock_and_validation_needed) {
9506 ret = dm_atomic_get_state(state, &dm_state);
9510 ret = do_aquire_global_lock(dev, state);
9514 #if defined(CONFIG_DRM_AMD_DC_DCN)
9515 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9518 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9524 * Perform validation of MST topology in the state:
9525 * We need to perform MST atomic check before calling
9526 * dc_validate_global_state(), or there is a chance
9527 * to get stuck in an infinite loop and hang eventually.
9529 ret = drm_dp_mst_atomic_check(state);
9532 status = dc_validate_global_state(dc, dm_state->context, false);
9533 if (status != DC_OK) {
9534 DC_LOG_WARNING("DC global validation failure: %s (%d)",
9535 dc_status_to_str(status), status);
9541 * The commit is a fast update. Fast updates shouldn't change
9542 * the DC context, affect global validation, and can have their
9543 * commit work done in parallel with other commits not touching
9544 * the same resource. If we have a new DC context as part of
9545 * the DM atomic state from validation we need to free it and
9546 * retain the existing one instead.
9548 * Furthermore, since the DM atomic state only contains the DC
9549 * context and can safely be annulled, we can free the state
9550 * and clear the associated private object now to free
9551 * some memory and avoid a possible use-after-free later.
9554 for (i = 0; i < state->num_private_objs; i++) {
9555 struct drm_private_obj *obj = state->private_objs[i].ptr;
9557 if (obj->funcs == adev->dm.atomic_obj.funcs) {
9558 int j = state->num_private_objs-1;
9560 dm_atomic_destroy_state(obj,
9561 state->private_objs[i].state);
9563 /* If i is not at the end of the array then the
9564 * last element needs to be moved to where i was
9565 * before the array can safely be truncated.
9568 state->private_objs[i] =
9569 state->private_objs[j];
9571 state->private_objs[j].ptr = NULL;
9572 state->private_objs[j].state = NULL;
9573 state->private_objs[j].old_state = NULL;
9574 state->private_objs[j].new_state = NULL;
9576 state->num_private_objs = j;
9582 /* Store the overall update type for use later in atomic check. */
9583 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9584 struct dm_crtc_state *dm_new_crtc_state =
9585 to_dm_crtc_state(new_crtc_state);
9587 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9592 /* Must be success */
9595 trace_amdgpu_dm_atomic_check_finish(state, ret);
9600 if (ret == -EDEADLK)
9601 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9602 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9603 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9605 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9607 trace_amdgpu_dm_atomic_check_finish(state, ret);
9612 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9613 struct amdgpu_dm_connector *amdgpu_dm_connector)
9616 bool capable = false;
9618 if (amdgpu_dm_connector->dc_link &&
9619 dm_helpers_dp_read_dpcd(
9621 amdgpu_dm_connector->dc_link,
9622 DP_DOWN_STREAM_PORT_COUNT,
9624 sizeof(dpcd_data))) {
9625 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9630 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9634 bool edid_check_required;
9635 struct detailed_timing *timing;
9636 struct detailed_non_pixel *data;
9637 struct detailed_data_monitor_range *range;
9638 struct amdgpu_dm_connector *amdgpu_dm_connector =
9639 to_amdgpu_dm_connector(connector);
9640 struct dm_connector_state *dm_con_state = NULL;
9642 struct drm_device *dev = connector->dev;
9643 struct amdgpu_device *adev = drm_to_adev(dev);
9644 bool freesync_capable = false;
9646 if (!connector->state) {
9647 DRM_ERROR("%s - Connector has no state", __func__);
9652 dm_con_state = to_dm_connector_state(connector->state);
9654 amdgpu_dm_connector->min_vfreq = 0;
9655 amdgpu_dm_connector->max_vfreq = 0;
9656 amdgpu_dm_connector->pixel_clock_mhz = 0;
9661 dm_con_state = to_dm_connector_state(connector->state);
9663 edid_check_required = false;
9664 if (!amdgpu_dm_connector->dc_sink) {
9665 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9668 if (!adev->dm.freesync_module)
9671 * if edid non zero restrict freesync only for dp and edp
9674 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9675 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9676 edid_check_required = is_dp_capable_without_timing_msa(
9678 amdgpu_dm_connector);
9681 if (edid_check_required == true && (edid->version > 1 ||
9682 (edid->version == 1 && edid->revision > 1))) {
9683 for (i = 0; i < 4; i++) {
9685 timing = &edid->detailed_timings[i];
9686 data = &timing->data.other_data;
9687 range = &data->data.range;
9689 * Check if monitor has continuous frequency mode
9691 if (data->type != EDID_DETAIL_MONITOR_RANGE)
9694 * Check for flag range limits only. If flag == 1 then
9695 * no additional timing information provided.
9696 * Default GTF, GTF Secondary curve and CVT are not
9699 if (range->flags != 1)
9702 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9703 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9704 amdgpu_dm_connector->pixel_clock_mhz =
9705 range->pixel_clock_mhz * 10;
9707 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
9708 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
9713 if (amdgpu_dm_connector->max_vfreq -
9714 amdgpu_dm_connector->min_vfreq > 10) {
9716 freesync_capable = true;
9722 dm_con_state->freesync_capable = freesync_capable;
9724 if (connector->vrr_capable_property)
9725 drm_connector_set_vrr_capable_property(connector,
9729 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9731 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9733 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9735 if (link->type == dc_connection_none)
9737 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9738 dpcd_data, sizeof(dpcd_data))) {
9739 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9741 if (dpcd_data[0] == 0) {
9742 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9743 link->psr_settings.psr_feature_enabled = false;
9745 link->psr_settings.psr_version = DC_PSR_VERSION_1;
9746 link->psr_settings.psr_feature_enabled = true;
9749 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9754 * amdgpu_dm_link_setup_psr() - configure psr link
9755 * @stream: stream state
9757 * Return: true if success
9759 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9761 struct dc_link *link = NULL;
9762 struct psr_config psr_config = {0};
9763 struct psr_context psr_context = {0};
9769 link = stream->link;
9771 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9773 if (psr_config.psr_version > 0) {
9774 psr_config.psr_exit_link_training_required = 0x1;
9775 psr_config.psr_frame_capture_indication_req = 0;
9776 psr_config.psr_rfb_setup_time = 0x37;
9777 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9778 psr_config.allow_smu_optimizations = 0x0;
9780 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9783 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
9789 * amdgpu_dm_psr_enable() - enable psr f/w
9790 * @stream: stream state
9792 * Return: true if success
9794 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9796 struct dc_link *link = stream->link;
9797 unsigned int vsync_rate_hz = 0;
9798 struct dc_static_screen_params params = {0};
9799 /* Calculate number of static frames before generating interrupt to
9802 // Init fail safe of 2 frames static
9803 unsigned int num_frames_static = 2;
9805 DRM_DEBUG_DRIVER("Enabling psr...\n");
9807 vsync_rate_hz = div64_u64(div64_u64((
9808 stream->timing.pix_clk_100hz * 100),
9809 stream->timing.v_total),
9810 stream->timing.h_total);
9813 * Calculate number of frames such that at least 30 ms of time has
9816 if (vsync_rate_hz != 0) {
9817 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9818 num_frames_static = (30000 / frame_time_microsec) + 1;
9821 params.triggers.cursor_update = true;
9822 params.triggers.overlay_update = true;
9823 params.triggers.surface_update = true;
9824 params.num_frames = num_frames_static;
9826 dc_stream_set_static_screen_params(link->ctx->dc,
9830 return dc_link_set_psr_allow_active(link, true, false, false);
9834 * amdgpu_dm_psr_disable() - disable psr f/w
9835 * @stream: stream state
9837 * Return: true if success
9839 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9842 DRM_DEBUG_DRIVER("Disabling psr...\n");
9844 return dc_link_set_psr_allow_active(stream->link, false, true, false);
9848 * amdgpu_dm_psr_disable() - disable psr f/w
9849 * if psr is enabled on any stream
9851 * Return: true if success
9853 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9855 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9856 return dc_set_psr_allow_active(dm->dc, false);
9859 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9861 struct amdgpu_device *adev = drm_to_adev(dev);
9862 struct dc *dc = adev->dm.dc;
9865 mutex_lock(&adev->dm.dc_lock);
9866 if (dc->current_state) {
9867 for (i = 0; i < dc->current_state->stream_count; ++i)
9868 dc->current_state->streams[i]
9869 ->triggered_crtc_reset.enabled =
9870 adev->dm.force_timing_sync;
9872 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9873 dc_trigger_sync(dc, dc->current_state);
9875 mutex_unlock(&adev->dm.dc_lock);
9878 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9879 uint32_t value, const char *func_name)
9881 #ifdef DM_CHECK_ADDR_0
9883 DC_ERR("invalid register write. address = 0");
9887 cgs_write_register(ctx->cgs_device, address, value);
9888 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9891 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9892 const char *func_name)
9895 #ifdef DM_CHECK_ADDR_0
9897 DC_ERR("invalid register read; address = 0\n");
9902 if (ctx->dmub_srv &&
9903 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9904 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9909 value = cgs_read_register(ctx->cgs_device, address);
9911 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);