2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
26 #include <drm/drm_debugfs.h>
30 #include "intel_atomic.h"
31 #include "intel_crtc.h"
32 #include "intel_ddi.h"
34 #include "intel_display_types.h"
36 #include "intel_dp_aux.h"
37 #include "intel_frontbuffer.h"
38 #include "intel_hdmi.h"
39 #include "intel_psr.h"
40 #include "intel_psr_regs.h"
41 #include "intel_snps_phy.h"
42 #include "skl_universal_plane.h"
45 * DOC: Panel Self Refresh (PSR/SRD)
47 * Since Haswell Display controller supports Panel Self-Refresh on display
48 * panels witch have a remote frame buffer (RFB) implemented according to PSR
49 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
50 * when system is idle but display is on as it eliminates display refresh
51 * request to DDR memory completely as long as the frame buffer for that
52 * display is unchanged.
54 * Panel Self Refresh must be supported by both Hardware (source) and
57 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
58 * to power down the link and memory controller. For DSI panels the same idea
59 * is called "manual mode".
61 * The implementation uses the hardware-based PSR support which automatically
62 * enters/exits self-refresh mode. The hardware takes care of sending the
63 * required DP aux message and could even retrain the link (that part isn't
64 * enabled yet though). The hardware also keeps track of any frontbuffer
65 * changes to know when to exit self-refresh mode again. Unfortunately that
66 * part doesn't work too well, hence why the i915 PSR support uses the
67 * software frontbuffer tracking to make sure it doesn't miss a screen
68 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
69 * get called by the frontbuffer tracking code. Note that because of locking
70 * issues the self-refresh re-enable code is done from a work queue, which
71 * must be correctly synchronized/cancelled when shutting down the pipe."
73 * DC3CO (DC3 clock off)
75 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
76 * clock off automatically during PSR2 idle state.
77 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
78 * entry/exit allows the HW to enter a low-power state even when page flipping
79 * periodically (for instance a 30fps video playback scenario).
81 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
82 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
83 * frames, if no other flip occurs and the function above is executed, DC3CO is
84 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
86 * Front buffer modifications do not trigger DC3CO activation on purpose as it
87 * would bring a lot of complexity and most of the moderns systems will only
92 * Description of PSR mask bits:
94 * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
96 * When unmasked (nearly) all display register writes (eg. even
97 * SWF) trigger a PSR exit. Some registers are excluded from this
98 * and they have a more specific mask (described below). On icl+
99 * this bit no longer exists and is effectively always set.
101 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
103 * When unmasked (nearly) all pipe/plane register writes
104 * trigger a PSR exit. Some plane registers are excluded from this
105 * and they have a more specific mask (described below).
107 * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
108 * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
109 * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
111 * When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
112 * SPR_SURF/CURBASE are not included in this and instead are
113 * controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
114 * EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
116 * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
117 * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
119 * When unmasked PSR is blocked as long as the sprite
120 * plane is enabled. skl+ with their universal planes no
121 * longer have a mask bit like this, and no plane being
122 * enabledb blocks PSR.
124 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
125 * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
127 * When umasked CURPOS writes trigger a PSR exit. On skl+
128 * this doesn't exit but CURPOS is included in the
129 * PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
131 * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
132 * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
134 * When unmasked PSR is blocked as long as vblank and/or vsync
135 * interrupt is unmasked in IMR *and* enabled in IER.
137 * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
138 * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
140 * Selectcs whether PSR exit generates an extra vblank before
141 * the first frame is transmitted. Also note the opposite polarity
142 * if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
143 * unmasked==do not generate the extra vblank).
145 * With DC states enabled the extra vblank happens after link training,
146 * with DC states disabled it happens immediately upuon PSR exit trigger.
147 * No idea as of now why there is a difference. HSW/BDW (which don't
148 * even have DMC) always generate it after link training. Go figure.
150 * Unfortunately CHICKEN_TRANS itself seems to be double buffered
151 * and thus won't latch until the first vblank. So with DC states
152 * enabled the register effctively uses the reset value during DC5
153 * exit+PSR exit sequence, and thus the bit does nothing until
154 * latched by the vblank that it was trying to prevent from being
155 * generated in the first place. So we should probably call this
156 * one a chicken/egg bit instead on skl+.
158 * In standby mode (as opposed to link-off) this makes no difference
159 * as the timing generator keeps running the whole time generating
160 * normal periodic vblanks.
162 * WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
163 * and doing so makes the behaviour match the skl+ reset value.
165 * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
166 * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
168 * On BDW without this bit is no vblanks whatsoever are
169 * generated after PSR exit. On HSW this has no apparant effect.
170 * WaPsrDPRSUnmaskVBlankInSRD says to set this.
172 * The rest of the bits are more self-explanatory and/or
173 * irrelevant for normal operation.
175 * Description of intel_crtc_state variables. has_psr, has_panel_replay and
178 * has_psr (alone): PSR1
179 * has_psr + has_sel_update: PSR2
180 * has_psr + has_panel_replay: Panel Replay
181 * has_psr + has_panel_replay + has_sel_update: Panel Replay Selective Update
183 * Description of some intel_psr varibles. enabled, panel_replay_enabled,
186 * enabled (alone): PSR1
187 * enabled + sel_update_enabled: PSR2
188 * enabled + panel_replay_enabled: Panel Replay
189 * enabled + panel_replay_enabled + sel_update_enabled: Panel Replay SU
192 #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
193 (intel_dp)->psr.source_support)
195 bool intel_encoder_can_psr(struct intel_encoder *encoder)
197 if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
198 return CAN_PSR(enc_to_intel_dp(encoder)) ||
199 CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
204 static bool psr_global_enabled(struct intel_dp *intel_dp)
206 struct intel_connector *connector = intel_dp->attached_connector;
207 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
209 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
210 case I915_PSR_DEBUG_DEFAULT:
211 if (i915->display.params.enable_psr == -1)
212 return connector->panel.vbt.psr.enable;
213 return i915->display.params.enable_psr;
214 case I915_PSR_DEBUG_DISABLE:
221 static bool psr2_global_enabled(struct intel_dp *intel_dp)
223 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
225 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
226 case I915_PSR_DEBUG_DISABLE:
227 case I915_PSR_DEBUG_FORCE_PSR1:
230 if (i915->display.params.enable_psr == 1)
236 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
238 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
240 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
241 EDP_PSR_ERROR(intel_dp->psr.transcoder);
244 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
246 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
248 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
249 EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
252 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
254 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
256 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
257 EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
260 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
262 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
264 return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
265 EDP_PSR_MASK(intel_dp->psr.transcoder);
268 static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
269 enum transcoder cpu_transcoder)
271 if (DISPLAY_VER(dev_priv) >= 8)
272 return EDP_PSR_CTL(cpu_transcoder);
277 static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
278 enum transcoder cpu_transcoder)
280 if (DISPLAY_VER(dev_priv) >= 8)
281 return EDP_PSR_DEBUG(cpu_transcoder);
283 return HSW_SRD_DEBUG;
286 static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
287 enum transcoder cpu_transcoder)
289 if (DISPLAY_VER(dev_priv) >= 8)
290 return EDP_PSR_PERF_CNT(cpu_transcoder);
292 return HSW_SRD_PERF_CNT;
295 static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
296 enum transcoder cpu_transcoder)
298 if (DISPLAY_VER(dev_priv) >= 8)
299 return EDP_PSR_STATUS(cpu_transcoder);
301 return HSW_SRD_STATUS;
304 static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
305 enum transcoder cpu_transcoder)
307 if (DISPLAY_VER(dev_priv) >= 12)
308 return TRANS_PSR_IMR(cpu_transcoder);
313 static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
314 enum transcoder cpu_transcoder)
316 if (DISPLAY_VER(dev_priv) >= 12)
317 return TRANS_PSR_IIR(cpu_transcoder);
322 static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
323 enum transcoder cpu_transcoder)
325 if (DISPLAY_VER(dev_priv) >= 8)
326 return EDP_PSR_AUX_CTL(cpu_transcoder);
328 return HSW_SRD_AUX_CTL;
331 static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
332 enum transcoder cpu_transcoder, int i)
334 if (DISPLAY_VER(dev_priv) >= 8)
335 return EDP_PSR_AUX_DATA(cpu_transcoder, i);
337 return HSW_SRD_AUX_DATA(i);
340 static void psr_irq_control(struct intel_dp *intel_dp)
342 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
343 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
346 if (intel_dp->psr.panel_replay_enabled)
349 mask = psr_irq_psr_error_bit_get(intel_dp);
350 if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
351 mask |= psr_irq_post_exit_bit_get(intel_dp) |
352 psr_irq_pre_entry_bit_get(intel_dp);
354 intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
355 psr_irq_mask_get(intel_dp), ~mask);
358 static void psr_event_print(struct drm_i915_private *i915,
359 u32 val, bool psr2_enabled)
361 drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
362 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
363 drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
364 if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
365 drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
366 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
367 drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
368 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
369 drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
370 if (val & PSR_EVENT_GRAPHICS_RESET)
371 drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
372 if (val & PSR_EVENT_PCH_INTERRUPT)
373 drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
374 if (val & PSR_EVENT_MEMORY_UP)
375 drm_dbg_kms(&i915->drm, "\tMemory up\n");
376 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
377 drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
378 if (val & PSR_EVENT_WD_TIMER_EXPIRE)
379 drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
380 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
381 drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
382 if (val & PSR_EVENT_REGISTER_UPDATE)
383 drm_dbg_kms(&i915->drm, "\tRegister updated\n");
384 if (val & PSR_EVENT_HDCP_ENABLE)
385 drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
386 if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
387 drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
388 if (val & PSR_EVENT_VBI_ENABLE)
389 drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
390 if (val & PSR_EVENT_LPSP_MODE_EXIT)
391 drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
392 if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
393 drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
396 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
398 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
399 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
400 ktime_t time_ns = ktime_get();
402 if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
403 intel_dp->psr.last_entry_attempt = time_ns;
404 drm_dbg_kms(&dev_priv->drm,
405 "[transcoder %s] PSR entry attempt in 2 vblanks\n",
406 transcoder_name(cpu_transcoder));
409 if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
410 intel_dp->psr.last_exit = time_ns;
411 drm_dbg_kms(&dev_priv->drm,
412 "[transcoder %s] PSR exit completed\n",
413 transcoder_name(cpu_transcoder));
415 if (DISPLAY_VER(dev_priv) >= 9) {
418 val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0);
420 psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled);
424 if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
425 drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
426 transcoder_name(cpu_transcoder));
428 intel_dp->psr.irq_aux_error = true;
431 * If this interruption is not masked it will keep
432 * interrupting so fast that it prevents the scheduled
434 * Also after a PSR error, we don't want to arm PSR
435 * again so we don't care about unmask the interruption
436 * or unset irq_aux_error.
438 intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
439 0, psr_irq_psr_error_bit_get(intel_dp));
441 queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
445 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
449 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
452 return alpm_caps & DP_ALPM_CAP;
455 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
457 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
458 u8 val = 8; /* assume the worst if we can't read the value */
460 if (drm_dp_dpcd_readb(&intel_dp->aux,
461 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
462 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
464 drm_dbg_kms(&i915->drm,
465 "Unable to get sink synchronization latency, assuming 8 frames\n");
469 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
471 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
476 /* If sink don't have specific granularity requirements set legacy ones */
477 if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
478 /* As PSR2 HW sends full lines, we do not care about x granularity */
484 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
486 drm_dbg_kms(&i915->drm,
487 "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
489 * Spec says that if the value read is 0 the default granularity should
492 if (r != 2 || w == 0)
495 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
497 drm_dbg_kms(&i915->drm,
498 "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
505 intel_dp->psr.su_w_granularity = w;
506 intel_dp->psr.su_y_granularity = y;
509 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
511 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
514 intel_dp->psr.sink_panel_replay_support = false;
515 drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP, &pr_dpcd);
517 if (!(pr_dpcd & DP_PANEL_REPLAY_SUPPORT)) {
518 drm_dbg_kms(&i915->drm,
519 "Panel replay is not supported by panel\n");
523 drm_dbg_kms(&i915->drm,
524 "Panel replay is supported by panel\n");
525 intel_dp->psr.sink_panel_replay_support = true;
528 static void _psr_init_dpcd(struct intel_dp *intel_dp)
530 struct drm_i915_private *i915 =
531 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
533 drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n",
534 intel_dp->psr_dpcd[0]);
536 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
537 drm_dbg_kms(&i915->drm,
538 "PSR support not currently available for this panel\n");
542 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
543 drm_dbg_kms(&i915->drm,
544 "Panel lacks power state control, PSR cannot be enabled\n");
548 intel_dp->psr.sink_support = true;
549 intel_dp->psr.sink_sync_latency =
550 intel_dp_get_sink_sync_latency(intel_dp);
552 if (DISPLAY_VER(i915) >= 9 &&
553 intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
554 bool y_req = intel_dp->psr_dpcd[1] &
555 DP_PSR2_SU_Y_COORDINATE_REQUIRED;
556 bool alpm = intel_dp_get_alpm_status(intel_dp);
559 * All panels that supports PSR version 03h (PSR2 +
560 * Y-coordinate) can handle Y-coordinates in VSC but we are
561 * only sure that it is going to be used when required by the
562 * panel. This way panel is capable to do selective update
563 * without a aux frame sync.
565 * To support PSR version 02h and PSR version 03h without
566 * Y-coordinate requirement panels we would need to enable
569 intel_dp->psr.sink_psr2_support = y_req && alpm;
570 drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n",
571 intel_dp->psr.sink_psr2_support ? "" : "not ");
575 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
577 _panel_replay_init_dpcd(intel_dp);
579 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
580 sizeof(intel_dp->psr_dpcd));
582 if (intel_dp->psr_dpcd[0])
583 _psr_init_dpcd(intel_dp);
585 if (intel_dp->psr.sink_psr2_support)
586 intel_dp_get_su_granularity(intel_dp);
589 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
591 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
592 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
593 u32 aux_clock_divider, aux_ctl;
594 /* write DP_SET_POWER=D0 */
595 static const u8 aux_msg[] = {
596 [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
597 [1] = (DP_SET_POWER >> 8) & 0xff,
598 [2] = DP_SET_POWER & 0xff,
600 [4] = DP_SET_POWER_D0,
604 BUILD_BUG_ON(sizeof(aux_msg) > 20);
605 for (i = 0; i < sizeof(aux_msg); i += 4)
606 intel_de_write(dev_priv,
607 psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
608 intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
610 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
612 /* Start with bits set for DDI_AUX_CTL register */
613 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
616 /* Select only valid bits for SRD_AUX_CTL */
617 aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
618 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
619 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
620 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
622 intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
626 static bool psr2_su_region_et_valid(struct intel_dp *intel_dp)
628 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
630 if (DISPLAY_VER(i915) >= 20 &&
631 intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
632 !(intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE))
638 static unsigned int intel_psr_get_enable_sink_offset(struct intel_dp *intel_dp)
640 return intel_dp->psr.panel_replay_enabled ?
641 PANEL_REPLAY_CONFIG : DP_PSR_EN_CFG;
645 * Note: Most of the bits are same in PANEL_REPLAY_CONFIG and DP_PSR_EN_CFG. We
646 * are relying on PSR definitions on these "common" bits.
648 void intel_psr_enable_sink(struct intel_dp *intel_dp,
649 const struct intel_crtc_state *crtc_state)
651 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
652 u8 dpcd_val = DP_PSR_ENABLE;
654 if (crtc_state->has_psr2) {
655 /* Enable ALPM at sink for psr2 */
656 if (!crtc_state->has_panel_replay) {
657 drm_dp_dpcd_writeb(&intel_dp->aux,
658 DP_RECEIVER_ALPM_CONFIG,
660 DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
662 if (psr2_su_region_et_valid(intel_dp))
663 dpcd_val |= DP_PSR_ENABLE_SU_REGION_ET;
666 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
668 if (intel_dp->psr.link_standby)
669 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
671 if (!crtc_state->has_panel_replay && DISPLAY_VER(dev_priv) >= 8)
672 dpcd_val |= DP_PSR_CRC_VERIFICATION;
675 if (crtc_state->has_panel_replay)
676 dpcd_val |= DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
677 DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN;
679 if (crtc_state->req_psr2_sdp_prior_scanline)
680 dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
682 if (intel_dp->psr.entry_setup_frames > 0)
683 dpcd_val |= DP_PSR_FRAME_CAPTURE;
685 drm_dp_dpcd_writeb(&intel_dp->aux,
686 intel_psr_get_enable_sink_offset(intel_dp),
689 if (intel_dp_is_edp(intel_dp))
690 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
693 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
695 struct intel_connector *connector = intel_dp->attached_connector;
696 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
699 if (DISPLAY_VER(dev_priv) >= 11)
700 val |= EDP_PSR_TP4_TIME_0us;
702 if (dev_priv->display.params.psr_safest_params) {
703 val |= EDP_PSR_TP1_TIME_2500us;
704 val |= EDP_PSR_TP2_TP3_TIME_2500us;
708 if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
709 val |= EDP_PSR_TP1_TIME_0us;
710 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
711 val |= EDP_PSR_TP1_TIME_100us;
712 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
713 val |= EDP_PSR_TP1_TIME_500us;
715 val |= EDP_PSR_TP1_TIME_2500us;
717 if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
718 val |= EDP_PSR_TP2_TP3_TIME_0us;
719 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
720 val |= EDP_PSR_TP2_TP3_TIME_100us;
721 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
722 val |= EDP_PSR_TP2_TP3_TIME_500us;
724 val |= EDP_PSR_TP2_TP3_TIME_2500us;
728 * "Do not skip both TP1 and TP2/TP3"
730 if (DISPLAY_VER(dev_priv) < 9 &&
731 connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
732 connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
733 val |= EDP_PSR_TP2_TP3_TIME_100us;
736 if (intel_dp_source_supports_tps3(dev_priv) &&
737 drm_dp_tps3_supported(intel_dp->dpcd))
738 val |= EDP_PSR_TP_TP1_TP3;
740 val |= EDP_PSR_TP_TP1_TP2;
745 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
747 struct intel_connector *connector = intel_dp->attached_connector;
748 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
751 /* Let's use 6 as the minimum to cover all known cases including the
752 * off-by-one issue that HW has in some cases.
754 idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
755 idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
757 if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
763 static void hsw_activate_psr1(struct intel_dp *intel_dp)
765 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
766 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
767 u32 max_sleep_time = 0x1f;
768 u32 val = EDP_PSR_ENABLE;
770 val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
772 if (DISPLAY_VER(dev_priv) < 20)
773 val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
775 if (IS_HASWELL(dev_priv))
776 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
778 if (intel_dp->psr.link_standby)
779 val |= EDP_PSR_LINK_STANDBY;
781 val |= intel_psr1_get_tp_time(intel_dp);
783 if (DISPLAY_VER(dev_priv) >= 8)
784 val |= EDP_PSR_CRC_ENABLE;
786 if (DISPLAY_VER(dev_priv) >= 20)
787 val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
789 intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
790 ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
793 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
795 struct intel_connector *connector = intel_dp->attached_connector;
796 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
799 if (dev_priv->display.params.psr_safest_params)
800 return EDP_PSR2_TP2_TIME_2500us;
802 if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
803 connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
804 val |= EDP_PSR2_TP2_TIME_50us;
805 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
806 val |= EDP_PSR2_TP2_TIME_100us;
807 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
808 val |= EDP_PSR2_TP2_TIME_500us;
810 val |= EDP_PSR2_TP2_TIME_2500us;
815 static int psr2_block_count_lines(struct intel_dp *intel_dp)
817 return intel_dp->psr.alpm_parameters.io_wake_lines < 9 &&
818 intel_dp->psr.alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
821 static int psr2_block_count(struct intel_dp *intel_dp)
823 return psr2_block_count_lines(intel_dp) / 4;
826 static u8 frames_before_su_entry(struct intel_dp *intel_dp)
828 u8 frames_before_su_entry;
830 frames_before_su_entry = max_t(u8,
831 intel_dp->psr.sink_sync_latency + 1,
834 /* Entry setup frames must be at least 1 less than frames before SU entry */
835 if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
836 frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
838 return frames_before_su_entry;
841 static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
843 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
845 intel_de_rmw(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
846 0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
848 intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
849 TRANS_DP2_PANEL_REPLAY_ENABLE);
852 static void hsw_activate_psr2(struct intel_dp *intel_dp)
854 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
855 struct intel_psr *psr = &intel_dp->psr;
856 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
857 u32 val = EDP_PSR2_ENABLE;
860 val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
862 if (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))
863 val |= EDP_SU_TRACK_ENABLE;
865 if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) < 13)
866 val |= EDP_Y_COORDINATE_ENABLE;
868 val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
870 val |= intel_psr2_get_tp_time(intel_dp);
872 if (DISPLAY_VER(dev_priv) >= 12) {
873 if (psr2_block_count(intel_dp) > 2)
874 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
876 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
879 /* Wa_22012278275:adl-p */
880 if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
881 static const u8 map[] = {
892 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
893 * comments bellow for more information
897 tmp = map[psr->alpm_parameters.io_wake_lines -
898 TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
899 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
901 tmp = map[psr->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
902 val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
903 } else if (DISPLAY_VER(dev_priv) >= 12) {
904 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(psr->alpm_parameters.io_wake_lines);
905 val |= TGL_EDP_PSR2_FAST_WAKE(psr->alpm_parameters.fast_wake_lines);
906 } else if (DISPLAY_VER(dev_priv) >= 9) {
907 val |= EDP_PSR2_IO_BUFFER_WAKE(psr->alpm_parameters.io_wake_lines);
908 val |= EDP_PSR2_FAST_WAKE(psr->alpm_parameters.fast_wake_lines);
911 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
912 val |= EDP_PSR2_SU_SDP_SCANLINE;
914 if (DISPLAY_VER(dev_priv) >= 20)
915 psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
917 if (intel_dp->psr.psr2_sel_fetch_enabled) {
920 tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
921 drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
922 } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
923 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
926 if (psr2_su_region_et_valid(intel_dp))
927 val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
930 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
931 * recommending keep this bit unset while PSR2 is enabled.
933 intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val);
935 intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
939 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
941 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
942 return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
943 else if (DISPLAY_VER(dev_priv) >= 12)
944 return cpu_transcoder == TRANSCODER_A;
945 else if (DISPLAY_VER(dev_priv) >= 9)
946 return cpu_transcoder == TRANSCODER_EDP;
951 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
953 if (!crtc_state->hw.active)
956 return DIV_ROUND_UP(1000 * 1000,
957 drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
960 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
963 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
964 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
966 intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
967 EDP_PSR2_IDLE_FRAMES_MASK,
968 EDP_PSR2_IDLE_FRAMES(idle_frames));
971 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
973 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
975 psr2_program_idle_frames(intel_dp, 0);
976 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
979 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
981 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
983 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
984 psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
987 static void tgl_dc3co_disable_work(struct work_struct *work)
989 struct intel_dp *intel_dp =
990 container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
992 mutex_lock(&intel_dp->psr.lock);
993 /* If delayed work is pending, it is not idle */
994 if (delayed_work_pending(&intel_dp->psr.dc3co_work))
997 tgl_psr2_disable_dc3co(intel_dp);
999 mutex_unlock(&intel_dp->psr.lock);
1002 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
1004 if (!intel_dp->psr.dc3co_exitline)
1007 cancel_delayed_work(&intel_dp->psr.dc3co_work);
1008 /* Before PSR2 exit disallow dc3co*/
1009 tgl_psr2_disable_dc3co(intel_dp);
1013 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
1014 struct intel_crtc_state *crtc_state)
1016 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1017 enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1018 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1019 enum port port = dig_port->base.port;
1021 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1022 return pipe <= PIPE_B && port <= PORT_B;
1024 return pipe == PIPE_A && port == PORT_A;
1028 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
1029 struct intel_crtc_state *crtc_state)
1031 const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
1032 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1033 struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1037 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
1038 * disable DC3CO until the changed dc3co activating/deactivating sequence
1039 * is applied. B.Specs:49196
1044 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
1045 * TODO: when the issue is addressed, this restriction should be removed.
1047 if (crtc_state->enable_psr2_sel_fetch)
1050 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
1053 if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
1056 /* Wa_16011303918:adl-p */
1057 if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1061 * DC3CO Exit time 200us B.Spec 49196
1062 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1065 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1067 if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
1070 crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1073 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1074 struct intel_crtc_state *crtc_state)
1076 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1078 if (!dev_priv->display.params.enable_psr2_sel_fetch &&
1079 intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1080 drm_dbg_kms(&dev_priv->drm,
1081 "PSR2 sel fetch not enabled, disabled by parameter\n");
1085 if (crtc_state->uapi.async_flip) {
1086 drm_dbg_kms(&dev_priv->drm,
1087 "PSR2 sel fetch not enabled, async flip enabled\n");
1091 if (psr2_su_region_et_valid(intel_dp))
1092 crtc_state->enable_psr2_su_region_et = true;
1094 return crtc_state->enable_psr2_sel_fetch = true;
1097 static bool psr2_granularity_check(struct intel_dp *intel_dp,
1098 struct intel_crtc_state *crtc_state)
1100 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1101 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1102 const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1103 const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1104 u16 y_granularity = 0;
1106 /* PSR2 HW only send full lines so we only need to validate the width */
1107 if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1110 if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1113 /* HW tracking is only aligned to 4 lines */
1114 if (!crtc_state->enable_psr2_sel_fetch)
1115 return intel_dp->psr.su_y_granularity == 4;
1118 * adl_p and mtl platforms have 1 line granularity.
1119 * For other platforms with SW tracking we can adjust the y coordinates
1120 * to match sink requirement if multiple of 4.
1122 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1123 y_granularity = intel_dp->psr.su_y_granularity;
1124 else if (intel_dp->psr.su_y_granularity <= 2)
1126 else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1127 y_granularity = intel_dp->psr.su_y_granularity;
1129 if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1132 if (crtc_state->dsc.compression_enable &&
1133 vdsc_cfg->slice_height % y_granularity)
1136 crtc_state->su_y_granularity = y_granularity;
1140 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1141 struct intel_crtc_state *crtc_state)
1143 const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1144 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1145 u32 hblank_total, hblank_ns, req_ns;
1147 hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1148 hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1150 /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1151 req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1153 if ((hblank_ns - req_ns) > 100)
1156 /* Not supported <13 / Wa_22012279113:adl-p */
1157 if (DISPLAY_VER(dev_priv) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1160 crtc_state->req_psr2_sdp_prior_scanline = true;
1165 * See Bspec: 71632 for the table
1167 * Silence_period = tSilence,Min + ((tSilence,Max - tSilence,Min) / 2)
1169 * Half cycle duration:
1171 * Link rates 1.62 - 4.32 and tLFPS_Cycle = 70 ns
1172 * FLOOR( (Link Rate * tLFPS_Cycle) / (2 * 10) )
1174 * Link rates 5.4 - 8.1
1175 * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ] = 10
1176 * LFPS Period chosen is the mid-point of the min:max values from the table
1177 * FLOOR( LFPS Period in Symbol clocks /
1178 * (2 * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ]) )
1180 static bool _lnl_get_silence_period_and_lfps_half_cycle(int link_rate,
1181 int *silence_period,
1182 int *lfps_half_cycle)
1184 switch (link_rate) {
1186 *silence_period = 20;
1187 *lfps_half_cycle = 5;
1190 *silence_period = 27;
1191 *lfps_half_cycle = 7;
1194 *silence_period = 31;
1195 *lfps_half_cycle = 8;
1198 *silence_period = 34;
1199 *lfps_half_cycle = 9;
1202 *silence_period = 41;
1203 *lfps_half_cycle = 11;
1206 *silence_period = 56;
1207 *lfps_half_cycle = 15;
1210 *silence_period = 69;
1211 *lfps_half_cycle = 12;
1214 *silence_period = 84;
1215 *lfps_half_cycle = 15;
1218 *silence_period = 87;
1219 *lfps_half_cycle = 15;
1222 *silence_period = 104;
1223 *lfps_half_cycle = 19;
1226 *silence_period = *lfps_half_cycle = -1;
1233 * AUX-Less Wake Time = CEILING( ((PHY P2 to P0) + tLFPS_Period, Max+
1234 * tSilence, Max+ tPHY Establishment + tCDS) / tline)
1235 * For the "PHY P2 to P0" latency see the PHY Power Control page
1236 * (PHY P2 to P0) : https://gfxspecs.intel.com/Predator/Home/Index/68965
1238 * The tLFPS_Period, Max term is 800ns
1239 * The tSilence, Max term is 180ns
1240 * The tPHY Establishment (a.k.a. t1) term is 50us
1241 * The tCDS term is 1 or 2 times t2
1242 * t2 = Number ML_PHY_LOCK * tML_PHY_LOCK
1243 * Number ML_PHY_LOCK = ( 7 + CEILING( 6.5us / tML_PHY_LOCK ) + 1)
1244 * Rounding up the 6.5us padding to the next ML_PHY_LOCK boundary and
1245 * adding the "+ 1" term ensures all ML_PHY_LOCK sequences that start
1246 * within the CDS period complete within the CDS period regardless of
1247 * entry into the period
1248 * tML_PHY_LOCK = TPS4 Length * ( 10 / (Link Rate in MHz) )
1249 * TPS4 Length = 252 Symbols
1251 static int _lnl_compute_aux_less_wake_time(int port_clock)
1253 int tphy2_p2_to_p0 = 12 * 1000;
1254 int tlfps_period_max = 800;
1255 int tsilence_max = 180;
1258 int tml_phy_lock = 1000 * 1000 * tps4 * 10 / port_clock;
1259 int num_ml_phy_lock = 7 + DIV_ROUND_UP(6500, tml_phy_lock) + 1;
1260 int t2 = num_ml_phy_lock * tml_phy_lock;
1263 return DIV_ROUND_UP(tphy2_p2_to_p0 + tlfps_period_max + tsilence_max +
1267 static int _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
1268 struct intel_crtc_state *crtc_state)
1270 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1271 int aux_less_wake_time, aux_less_wake_lines, silence_period,
1274 aux_less_wake_time =
1275 _lnl_compute_aux_less_wake_time(crtc_state->port_clock);
1276 aux_less_wake_lines = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode,
1277 aux_less_wake_time);
1279 if (!_lnl_get_silence_period_and_lfps_half_cycle(crtc_state->port_clock,
1284 if (aux_less_wake_lines > ALPM_CTL_AUX_LESS_WAKE_TIME_MASK ||
1285 silence_period > PORT_ALPM_CTL_SILENCE_PERIOD_MASK ||
1286 lfps_half_cycle > PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION_MASK)
1289 if (i915->display.params.psr_safest_params)
1290 aux_less_wake_lines = ALPM_CTL_AUX_LESS_WAKE_TIME_MASK;
1292 intel_dp->psr.alpm_parameters.fast_wake_lines = aux_less_wake_lines;
1293 intel_dp->psr.alpm_parameters.silence_period_sym_clocks = silence_period;
1294 intel_dp->psr.alpm_parameters.lfps_half_cycle_num_of_syms = lfps_half_cycle;
1299 static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
1300 struct intel_crtc_state *crtc_state)
1302 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1303 int check_entry_lines;
1305 if (DISPLAY_VER(i915) < 20)
1308 /* ALPM Entry Check = 2 + CEILING( 5us /tline ) */
1309 check_entry_lines = 2 +
1310 intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, 5);
1312 if (check_entry_lines > 15)
1315 if (!_lnl_compute_aux_less_alpm_params(intel_dp, crtc_state))
1318 if (i915->display.params.psr_safest_params)
1319 check_entry_lines = 15;
1321 intel_dp->psr.alpm_parameters.check_entry_lines = check_entry_lines;
1327 * IO wake time for DISPLAY_VER < 12 is not directly mentioned in Bspec. There
1328 * are 50 us io wake time and 32 us fast wake time. Clearly preharge pulses are
1329 * not (improperly) included in 32 us fast wake time. 50 us - 32 us = 18 us.
1331 static int skl_io_buffer_wake_time(void)
1336 static int tgl_io_buffer_wake_time(void)
1341 static int io_buffer_wake_time(const struct intel_crtc_state *crtc_state)
1343 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1345 if (DISPLAY_VER(i915) >= 12)
1346 return tgl_io_buffer_wake_time();
1348 return skl_io_buffer_wake_time();
1351 static bool _compute_alpm_params(struct intel_dp *intel_dp,
1352 struct intel_crtc_state *crtc_state)
1354 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1355 int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
1356 int tfw_exit_latency = 20; /* eDP spec */
1357 int phy_wake = 4; /* eDP spec */
1358 int preamble = 8; /* eDP spec */
1359 int precharge = intel_dp_aux_fw_sync_len() - preamble;
1362 io_wake_time = max(precharge, io_buffer_wake_time(crtc_state)) +
1363 preamble + phy_wake + tfw_exit_latency;
1364 fast_wake_time = precharge + preamble + phy_wake +
1367 if (DISPLAY_VER(i915) >= 12)
1368 /* TODO: Check how we can use ALPM_CTL fast wake extended field */
1369 max_wake_lines = 12;
1373 io_wake_lines = intel_usecs_to_scanlines(
1374 &crtc_state->hw.adjusted_mode, io_wake_time);
1375 fast_wake_lines = intel_usecs_to_scanlines(
1376 &crtc_state->hw.adjusted_mode, fast_wake_time);
1378 if (io_wake_lines > max_wake_lines ||
1379 fast_wake_lines > max_wake_lines)
1382 if (!_lnl_compute_alpm_params(intel_dp, crtc_state))
1385 if (i915->display.params.psr_safest_params)
1386 io_wake_lines = fast_wake_lines = max_wake_lines;
1388 /* According to Bspec lower limit should be set as 7 lines. */
1389 intel_dp->psr.alpm_parameters.io_wake_lines = max(io_wake_lines, 7);
1390 intel_dp->psr.alpm_parameters.fast_wake_lines = max(fast_wake_lines, 7);
1395 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1396 const struct drm_display_mode *adjusted_mode)
1398 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1399 int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1400 int entry_setup_frames = 0;
1402 if (psr_setup_time < 0) {
1403 drm_dbg_kms(&i915->drm,
1404 "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1405 intel_dp->psr_dpcd[1]);
1409 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1410 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1411 if (DISPLAY_VER(i915) >= 20) {
1412 /* setup entry frames can be up to 3 frames */
1413 entry_setup_frames = 1;
1414 drm_dbg_kms(&i915->drm,
1415 "PSR setup entry frames %d\n",
1416 entry_setup_frames);
1418 drm_dbg_kms(&i915->drm,
1419 "PSR condition failed: PSR setup time (%d us) too long\n",
1425 return entry_setup_frames;
1428 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1429 struct intel_crtc_state *crtc_state)
1431 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1432 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1433 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1434 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1436 if (!intel_dp->psr.sink_psr2_support)
1439 /* JSL and EHL only supports eDP 1.3 */
1440 if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
1441 drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
1445 /* Wa_16011181250 */
1446 if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1448 drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
1452 if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1453 drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
1457 if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
1458 drm_dbg_kms(&dev_priv->drm,
1459 "PSR2 not supported in transcoder %s\n",
1460 transcoder_name(crtc_state->cpu_transcoder));
1464 if (!psr2_global_enabled(intel_dp)) {
1465 drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
1470 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1471 * resolution requires DSC to be enabled, priority is given to DSC
1474 if (crtc_state->dsc.compression_enable &&
1475 (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
1476 drm_dbg_kms(&dev_priv->drm,
1477 "PSR2 cannot be enabled since DSC is enabled\n");
1481 if (crtc_state->crc_enabled) {
1482 drm_dbg_kms(&dev_priv->drm,
1483 "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
1487 if (DISPLAY_VER(dev_priv) >= 12) {
1491 } else if (DISPLAY_VER(dev_priv) >= 10) {
1495 } else if (DISPLAY_VER(dev_priv) == 9) {
1501 if (crtc_state->pipe_bpp > max_bpp) {
1502 drm_dbg_kms(&dev_priv->drm,
1503 "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1504 crtc_state->pipe_bpp, max_bpp);
1508 /* Wa_16011303918:adl-p */
1509 if (crtc_state->vrr.enable &&
1510 IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1511 drm_dbg_kms(&dev_priv->drm,
1512 "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1516 if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1517 drm_dbg_kms(&dev_priv->drm,
1518 "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1522 if (!_compute_alpm_params(intel_dp, crtc_state)) {
1523 drm_dbg_kms(&dev_priv->drm,
1524 "PSR2 not enabled, Unable to use long enough wake times\n");
1528 /* Vblank >= PSR2_CTL Block Count Number maximum line count */
1529 if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
1530 crtc_state->hw.adjusted_mode.crtc_vblank_start <
1531 psr2_block_count_lines(intel_dp)) {
1532 drm_dbg_kms(&dev_priv->drm,
1533 "PSR2 not enabled, too short vblank time\n");
1537 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1538 if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1539 !HAS_PSR_HW_TRACKING(dev_priv)) {
1540 drm_dbg_kms(&dev_priv->drm,
1541 "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
1546 if (!psr2_granularity_check(intel_dp, crtc_state)) {
1547 drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
1551 if (!crtc_state->enable_psr2_sel_fetch &&
1552 (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1553 drm_dbg_kms(&dev_priv->drm,
1554 "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1555 crtc_hdisplay, crtc_vdisplay,
1556 psr_max_h, psr_max_v);
1560 tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1564 crtc_state->enable_psr2_sel_fetch = false;
1568 static bool _psr_compute_config(struct intel_dp *intel_dp,
1569 struct intel_crtc_state *crtc_state)
1571 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1572 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1573 int entry_setup_frames;
1576 * Current PSR panels don't work reliably with VRR enabled
1577 * So if VRR is enabled, do not enable PSR.
1579 if (crtc_state->vrr.enable)
1582 if (!CAN_PSR(intel_dp))
1585 entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1587 if (entry_setup_frames >= 0) {
1588 intel_dp->psr.entry_setup_frames = entry_setup_frames;
1590 drm_dbg_kms(&dev_priv->drm,
1591 "PSR condition failed: PSR setup timing not met\n");
1598 void intel_psr_compute_config(struct intel_dp *intel_dp,
1599 struct intel_crtc_state *crtc_state,
1600 struct drm_connector_state *conn_state)
1602 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1603 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1605 if (!psr_global_enabled(intel_dp)) {
1606 drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1610 if (intel_dp->psr.sink_not_reliable) {
1611 drm_dbg_kms(&dev_priv->drm,
1612 "PSR sink implementation is not reliable\n");
1616 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1617 drm_dbg_kms(&dev_priv->drm,
1618 "PSR condition failed: Interlaced mode enabled\n");
1623 * FIXME figure out what is wrong with PSR+bigjoiner and
1624 * fix it. Presumably something related to the fact that
1625 * PSR is a transcoder level feature.
1627 if (crtc_state->bigjoiner_pipes) {
1628 drm_dbg_kms(&dev_priv->drm,
1629 "PSR disabled due to bigjoiner\n");
1633 if (CAN_PANEL_REPLAY(intel_dp))
1634 crtc_state->has_panel_replay = true;
1636 crtc_state->has_psr = crtc_state->has_panel_replay ? true :
1637 _psr_compute_config(intel_dp, crtc_state);
1639 if (!crtc_state->has_psr)
1642 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1645 void intel_psr_get_config(struct intel_encoder *encoder,
1646 struct intel_crtc_state *pipe_config)
1648 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1649 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1650 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1651 struct intel_dp *intel_dp;
1657 intel_dp = &dig_port->dp;
1658 if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1661 mutex_lock(&intel_dp->psr.lock);
1662 if (!intel_dp->psr.enabled)
1665 if (intel_dp->psr.panel_replay_enabled) {
1666 pipe_config->has_psr = pipe_config->has_panel_replay = true;
1669 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1670 * enabled/disabled because of frontbuffer tracking and others.
1672 pipe_config->has_psr = true;
1675 pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1676 pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1678 if (!intel_dp->psr.psr2_enabled)
1681 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1682 val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
1683 if (val & PSR2_MAN_TRK_CTL_ENABLE)
1684 pipe_config->enable_psr2_sel_fetch = true;
1687 if (DISPLAY_VER(dev_priv) >= 12) {
1688 val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder));
1689 pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1692 mutex_unlock(&intel_dp->psr.lock);
1695 static void intel_psr_activate(struct intel_dp *intel_dp)
1697 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1698 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1700 drm_WARN_ON(&dev_priv->drm,
1701 transcoder_has_psr2(dev_priv, cpu_transcoder) &&
1702 intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
1704 drm_WARN_ON(&dev_priv->drm,
1705 intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
1707 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1709 lockdep_assert_held(&intel_dp->psr.lock);
1711 /* psr1, psr2 and panel-replay are mutually exclusive.*/
1712 if (intel_dp->psr.panel_replay_enabled)
1713 dg2_activate_panel_replay(intel_dp);
1714 else if (intel_dp->psr.psr2_enabled)
1715 hsw_activate_psr2(intel_dp);
1717 hsw_activate_psr1(intel_dp);
1719 intel_dp->psr.active = true;
1722 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1724 switch (intel_dp->psr.pipe) {
1726 return LATENCY_REPORTING_REMOVED_PIPE_A;
1728 return LATENCY_REPORTING_REMOVED_PIPE_B;
1730 return LATENCY_REPORTING_REMOVED_PIPE_C;
1732 return LATENCY_REPORTING_REMOVED_PIPE_D;
1734 MISSING_CASE(intel_dp->psr.pipe);
1743 static void wm_optimization_wa(struct intel_dp *intel_dp,
1744 const struct intel_crtc_state *crtc_state)
1746 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1747 bool set_wa_bit = false;
1749 /* Wa_14015648006 */
1750 if (IS_DISPLAY_VER(dev_priv, 11, 14))
1751 set_wa_bit |= crtc_state->wm_level_disabled;
1753 /* Wa_16013835468 */
1754 if (DISPLAY_VER(dev_priv) == 12)
1755 set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1756 crtc_state->hw.adjusted_mode.crtc_vdisplay;
1759 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1760 0, wa_16013835468_bit_get(intel_dp));
1762 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1763 wa_16013835468_bit_get(intel_dp), 0);
1766 static void lnl_alpm_configure(struct intel_dp *intel_dp)
1768 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1769 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1770 struct intel_psr *psr = &intel_dp->psr;
1773 if (DISPLAY_VER(dev_priv) < 20 || (!intel_dp->psr.psr2_enabled &&
1774 !intel_dp_is_edp(intel_dp)))
1778 * Panel Replay on eDP is always using ALPM aux less. I.e. no need to
1779 * check panel support at this point.
1781 if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
1782 alpm_ctl = ALPM_CTL_ALPM_ENABLE |
1783 ALPM_CTL_ALPM_AUX_LESS_ENABLE |
1784 ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_50_SYMBOLS;
1786 intel_de_write(dev_priv, PORT_ALPM_CTL(cpu_transcoder),
1787 PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE |
1788 PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) |
1789 PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) |
1790 PORT_ALPM_CTL_SILENCE_PERIOD(
1791 psr->alpm_parameters.silence_period_sym_clocks));
1793 intel_de_write(dev_priv, PORT_ALPM_LFPS_CTL(cpu_transcoder),
1794 PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) |
1795 PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(
1796 psr->alpm_parameters.lfps_half_cycle_num_of_syms) |
1797 PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(
1798 psr->alpm_parameters.lfps_half_cycle_num_of_syms) |
1799 PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(
1800 psr->alpm_parameters.lfps_half_cycle_num_of_syms));
1802 alpm_ctl = ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
1803 ALPM_CTL_EXTENDED_FAST_WAKE_TIME(psr->alpm_parameters.fast_wake_lines);
1806 alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(psr->alpm_parameters.check_entry_lines);
1808 intel_de_write(dev_priv, ALPM_CTL(cpu_transcoder), alpm_ctl);
1811 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1812 const struct intel_crtc_state *crtc_state)
1814 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1815 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1819 * Only HSW and BDW have PSR AUX registers that need to be setup.
1820 * SKL+ use hardcoded values PSR AUX transactions
1822 if (DISPLAY_VER(dev_priv) < 9)
1823 hsw_psr_setup_aux(intel_dp);
1826 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1827 * mask LPSP to avoid dependency on other drivers that might block
1828 * runtime_pm besides preventing other hw tracking issues now we
1829 * can rely on frontbuffer tracking.
1831 * From bspec prior LunarLake:
1832 * Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in
1833 * panel replay mode.
1835 * From bspec beyod LunarLake:
1836 * Panel Replay on DP: No bits are applicable
1837 * Panel Replay on eDP: All bits are applicable
1839 if (DISPLAY_VER(dev_priv) < 20 || intel_dp_is_edp(intel_dp))
1840 mask = EDP_PSR_DEBUG_MASK_HPD;
1842 if (intel_dp_is_edp(intel_dp)) {
1843 mask |= EDP_PSR_DEBUG_MASK_MEMUP;
1846 * For some unknown reason on HSW non-ULT (or at least on
1847 * Dell Latitude E6540) external displays start to flicker
1848 * when PSR is enabled on the eDP. SR/PC6 residency is much
1849 * higher than should be possible with an external display.
1850 * As a workaround leave LPSP unmasked to prevent PSR entry
1851 * when external displays are active.
1853 if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
1854 mask |= EDP_PSR_DEBUG_MASK_LPSP;
1856 if (DISPLAY_VER(dev_priv) < 20)
1857 mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1860 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1861 * registers in order to keep the CURSURFLIVE tricks working :(
1863 if (IS_DISPLAY_VER(dev_priv, 9, 10))
1864 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1866 /* allow PSR with sprite enabled */
1867 if (IS_HASWELL(dev_priv))
1868 mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1871 intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
1873 psr_irq_control(intel_dp);
1876 * TODO: if future platforms supports DC3CO in more than one
1877 * transcoder, EXITLINE will need to be unset when disabling PSR
1879 if (intel_dp->psr.dc3co_exitline)
1880 intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
1881 intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1883 if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1884 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1885 intel_dp->psr.psr2_sel_fetch_enabled ?
1886 IGNORE_PSR2_HW_TRACKING : 0);
1888 if (intel_dp_is_edp(intel_dp))
1889 lnl_alpm_configure(intel_dp);
1895 wm_optimization_wa(intel_dp, crtc_state);
1897 if (intel_dp->psr.psr2_enabled) {
1898 if (DISPLAY_VER(dev_priv) == 9)
1899 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1900 PSR2_VSC_ENABLE_PROG_HEADER |
1901 PSR2_ADD_VERTICAL_LINE_COUNT);
1904 * Wa_16014451276:adlp,mtl[a0,b0]
1905 * All supported adlp panels have 1-based X granularity, this may
1906 * cause issues if non-supported panels are used.
1908 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
1909 IS_ALDERLAKE_P(dev_priv))
1910 intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
1911 0, ADLP_1_BASED_X_GRANULARITY);
1913 /* Wa_16012604467:adlp,mtl[a0,b0] */
1914 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1915 intel_de_rmw(dev_priv,
1916 MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
1917 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1918 else if (IS_ALDERLAKE_P(dev_priv))
1919 intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1920 CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1924 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1926 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1927 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1930 if (intel_dp->psr.panel_replay_enabled)
1934 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1935 * will still keep the error set even after the reset done in the
1936 * irq_preinstall and irq_uninstall hooks.
1937 * And enabling in this situation cause the screen to freeze in the
1938 * first time that PSR HW tries to activate so lets keep PSR disabled
1939 * to avoid any rendering problems.
1941 val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
1942 val &= psr_irq_psr_error_bit_get(intel_dp);
1944 intel_dp->psr.sink_not_reliable = true;
1945 drm_dbg_kms(&dev_priv->drm,
1946 "PSR interruption error set, not enabling PSR\n");
1954 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1955 const struct intel_crtc_state *crtc_state)
1957 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1958 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1961 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1963 intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1964 intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
1965 intel_dp->psr.busy_frontbuffer_bits = 0;
1966 intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1967 intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1968 /* DC5/DC6 requires at least 6 idle frames */
1969 val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1970 intel_dp->psr.dc3co_exit_delay = val;
1971 intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1972 intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1973 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1974 intel_dp->psr.req_psr2_sdp_prior_scanline =
1975 crtc_state->req_psr2_sdp_prior_scanline;
1977 if (!psr_interrupt_error_check(intel_dp))
1980 if (intel_dp->psr.panel_replay_enabled) {
1981 drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
1983 drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1984 intel_dp->psr.psr2_enabled ? "2" : "1");
1987 * Panel replay has to be enabled before link training: doing it
1988 * only for PSR here.
1990 intel_psr_enable_sink(intel_dp, crtc_state);
1993 if (intel_dp_is_edp(intel_dp))
1994 intel_snps_phy_update_psr_power_state(&dig_port->base, true);
1996 intel_psr_enable_source(intel_dp, crtc_state);
1997 intel_dp->psr.enabled = true;
1998 intel_dp->psr.paused = false;
2000 intel_psr_activate(intel_dp);
2003 static void intel_psr_exit(struct intel_dp *intel_dp)
2005 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2006 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2009 if (!intel_dp->psr.active) {
2010 if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
2011 val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
2012 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
2015 val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
2016 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
2021 if (intel_dp->psr.panel_replay_enabled) {
2022 intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder),
2023 TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
2024 } else if (intel_dp->psr.psr2_enabled) {
2025 tgl_disallow_dc3co_on_psr2_exit(intel_dp);
2027 val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
2028 EDP_PSR2_ENABLE, 0);
2030 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
2032 val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
2035 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
2037 intel_dp->psr.active = false;
2040 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
2042 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2043 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2044 i915_reg_t psr_status;
2045 u32 psr_status_mask;
2047 if (intel_dp->psr.psr2_enabled) {
2048 psr_status = EDP_PSR2_STATUS(cpu_transcoder);
2049 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
2051 psr_status = psr_status_reg(dev_priv, cpu_transcoder);
2052 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
2055 /* Wait till PSR is idle */
2056 if (intel_de_wait_for_clear(dev_priv, psr_status,
2057 psr_status_mask, 2000))
2058 drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
2061 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
2063 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2064 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2066 lockdep_assert_held(&intel_dp->psr.lock);
2068 if (!intel_dp->psr.enabled)
2071 if (intel_dp->psr.panel_replay_enabled)
2072 drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n");
2074 drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
2075 intel_dp->psr.psr2_enabled ? "2" : "1");
2077 intel_psr_exit(intel_dp);
2078 intel_psr_wait_exit_locked(intel_dp);
2084 if (DISPLAY_VER(dev_priv) >= 11)
2085 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
2086 wa_16013835468_bit_get(intel_dp), 0);
2088 if (intel_dp->psr.psr2_enabled) {
2089 /* Wa_16012604467:adlp,mtl[a0,b0] */
2090 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
2091 intel_de_rmw(dev_priv,
2092 MTL_CLKGATE_DIS_TRANS(cpu_transcoder),
2093 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
2094 else if (IS_ALDERLAKE_P(dev_priv))
2095 intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
2096 CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
2099 if (intel_dp_is_edp(intel_dp))
2100 intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
2102 /* Panel Replay on eDP is always using ALPM aux less. */
2103 if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
2104 intel_de_rmw(dev_priv, ALPM_CTL(cpu_transcoder),
2105 ALPM_CTL_ALPM_ENABLE |
2106 ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
2108 intel_de_rmw(dev_priv, PORT_ALPM_CTL(cpu_transcoder),
2109 PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
2112 /* Disable PSR on Sink */
2113 drm_dp_dpcd_writeb(&intel_dp->aux,
2114 intel_psr_get_enable_sink_offset(intel_dp), 0);
2116 if (!intel_dp->psr.panel_replay_enabled &&
2117 intel_dp->psr.psr2_enabled)
2118 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
2120 intel_dp->psr.enabled = false;
2121 intel_dp->psr.panel_replay_enabled = false;
2122 intel_dp->psr.psr2_enabled = false;
2123 intel_dp->psr.psr2_sel_fetch_enabled = false;
2124 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2128 * intel_psr_disable - Disable PSR
2129 * @intel_dp: Intel DP
2130 * @old_crtc_state: old CRTC state
2132 * This function needs to be called before disabling pipe.
2134 void intel_psr_disable(struct intel_dp *intel_dp,
2135 const struct intel_crtc_state *old_crtc_state)
2137 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2139 if (!old_crtc_state->has_psr)
2142 if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
2145 mutex_lock(&intel_dp->psr.lock);
2147 intel_psr_disable_locked(intel_dp);
2149 mutex_unlock(&intel_dp->psr.lock);
2150 cancel_work_sync(&intel_dp->psr.work);
2151 cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
2155 * intel_psr_pause - Pause PSR
2156 * @intel_dp: Intel DP
2158 * This function need to be called after enabling psr.
2160 void intel_psr_pause(struct intel_dp *intel_dp)
2162 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2163 struct intel_psr *psr = &intel_dp->psr;
2165 if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2168 mutex_lock(&psr->lock);
2170 if (!psr->enabled) {
2171 mutex_unlock(&psr->lock);
2175 /* If we ever hit this, we will need to add refcount to pause/resume */
2176 drm_WARN_ON(&dev_priv->drm, psr->paused);
2178 intel_psr_exit(intel_dp);
2179 intel_psr_wait_exit_locked(intel_dp);
2182 mutex_unlock(&psr->lock);
2184 cancel_work_sync(&psr->work);
2185 cancel_delayed_work_sync(&psr->dc3co_work);
2189 * intel_psr_resume - Resume PSR
2190 * @intel_dp: Intel DP
2192 * This function need to be called after pausing psr.
2194 void intel_psr_resume(struct intel_dp *intel_dp)
2196 struct intel_psr *psr = &intel_dp->psr;
2198 if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2201 mutex_lock(&psr->lock);
2206 psr->paused = false;
2207 intel_psr_activate(intel_dp);
2210 mutex_unlock(&psr->lock);
2213 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
2215 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
2216 PSR2_MAN_TRK_CTL_ENABLE;
2219 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
2221 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
2222 ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
2223 PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
2226 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
2228 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
2229 ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
2230 PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
2233 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
2235 return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
2236 ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
2237 PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
2240 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
2242 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2243 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2245 if (intel_dp->psr.psr2_sel_fetch_enabled)
2246 intel_de_write(dev_priv,
2247 PSR2_MAN_TRK_CTL(cpu_transcoder),
2248 man_trk_ctl_enable_bit_get(dev_priv) |
2249 man_trk_ctl_partial_frame_bit_get(dev_priv) |
2250 man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2251 man_trk_ctl_continuos_full_frame(dev_priv));
2254 * Display WA #0884: skl+
2255 * This documented WA for bxt can be safely applied
2256 * broadly so we can force HW tracking to exit PSR
2257 * instead of disabling and re-enabling.
2258 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
2259 * but it makes more sense write to the current active
2262 * This workaround do not exist for platforms with display 10 or newer
2263 * but testing proved that it works for up display 13, for newer
2264 * than that testing will be needed.
2266 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2269 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
2271 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2272 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2273 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2274 struct intel_encoder *encoder;
2276 if (!crtc_state->enable_psr2_sel_fetch)
2279 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2280 crtc_state->uapi.encoder_mask) {
2281 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2283 lockdep_assert_held(&intel_dp->psr.lock);
2284 if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
2289 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2290 crtc_state->psr2_man_track_ctl);
2292 if (!crtc_state->enable_psr2_su_region_et)
2295 intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
2296 crtc_state->pipe_srcsz_early_tpt);
2299 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
2302 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2303 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2304 u32 val = man_trk_ctl_enable_bit_get(dev_priv);
2306 /* SF partial frame enable has to be set even on full update */
2307 val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
2310 val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
2311 val |= man_trk_ctl_continuos_full_frame(dev_priv);
2315 if (crtc_state->psr2_su_area.y1 == -1)
2318 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
2319 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
2320 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
2322 drm_WARN_ON(crtc_state->uapi.crtc->dev,
2323 crtc_state->psr2_su_area.y1 % 4 ||
2324 crtc_state->psr2_su_area.y2 % 4);
2326 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
2327 crtc_state->psr2_su_area.y1 / 4 + 1);
2328 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
2329 crtc_state->psr2_su_area.y2 / 4 + 1);
2332 crtc_state->psr2_man_track_ctl = val;
2336 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
2337 bool full_update, bool cursor_in_su_area)
2341 if (!crtc_state->enable_psr2_su_region_et || full_update)
2344 if (!cursor_in_su_area)
2345 return PIPESRC_WIDTH(0) |
2346 PIPESRC_HEIGHT(drm_rect_height(&crtc_state->pipe_src));
2348 width = drm_rect_width(&crtc_state->psr2_su_area);
2349 height = drm_rect_height(&crtc_state->psr2_su_area);
2351 return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
2354 static void clip_area_update(struct drm_rect *overlap_damage_area,
2355 struct drm_rect *damage_area,
2356 struct drm_rect *pipe_src)
2358 if (!drm_rect_intersect(damage_area, pipe_src))
2361 if (overlap_damage_area->y1 == -1) {
2362 overlap_damage_area->y1 = damage_area->y1;
2363 overlap_damage_area->y2 = damage_area->y2;
2367 if (damage_area->y1 < overlap_damage_area->y1)
2368 overlap_damage_area->y1 = damage_area->y1;
2370 if (damage_area->y2 > overlap_damage_area->y2)
2371 overlap_damage_area->y2 = damage_area->y2;
2374 static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
2376 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2377 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2380 /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2381 if (crtc_state->dsc.compression_enable &&
2382 (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
2383 y_alignment = vdsc_cfg->slice_height;
2385 y_alignment = crtc_state->su_y_granularity;
2387 crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
2388 if (crtc_state->psr2_su_area.y2 % y_alignment)
2389 crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
2390 y_alignment) + 1) * y_alignment;
2394 * When early transport is in use we need to extend SU area to cover
2395 * cursor fully when cursor is in SU area.
2398 intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
2399 struct intel_crtc *crtc,
2400 bool *cursor_in_su_area)
2402 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2403 struct intel_plane_state *new_plane_state;
2404 struct intel_plane *plane;
2407 if (!crtc_state->enable_psr2_su_region_et)
2410 for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
2411 struct drm_rect inter;
2413 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2416 if (plane->id != PLANE_CURSOR)
2419 if (!new_plane_state->uapi.visible)
2422 inter = crtc_state->psr2_su_area;
2423 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2426 clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
2427 &crtc_state->pipe_src);
2428 *cursor_in_su_area = true;
2433 * TODO: Not clear how to handle planes with negative position,
2434 * also planes are not updated if they have a negative X
2435 * position so for now doing a full update in this cases
2437 * Plane scaling and rotation is not supported by selective fetch and both
2438 * properties can change without a modeset, so need to be check at every
2441 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2443 if (plane_state->uapi.dst.y1 < 0 ||
2444 plane_state->uapi.dst.x1 < 0 ||
2445 plane_state->scaler_id >= 0 ||
2446 plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2453 * Check for pipe properties that is not supported by selective fetch.
2455 * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2456 * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2457 * enabled and going to the full update path.
2459 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2461 if (crtc_state->scaler_state.scaler_id >= 0)
2467 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2468 struct intel_crtc *crtc)
2470 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2471 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2472 struct intel_plane_state *new_plane_state, *old_plane_state;
2473 struct intel_plane *plane;
2474 bool full_update = false, cursor_in_su_area = false;
2477 if (!crtc_state->enable_psr2_sel_fetch)
2480 if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2482 goto skip_sel_fetch_set_loop;
2485 crtc_state->psr2_su_area.x1 = 0;
2486 crtc_state->psr2_su_area.y1 = -1;
2487 crtc_state->psr2_su_area.x2 = INT_MAX;
2488 crtc_state->psr2_su_area.y2 = -1;
2491 * Calculate minimal selective fetch area of each plane and calculate
2492 * the pipe damaged area.
2493 * In the next loop the plane selective fetch area will actually be set
2494 * using whole pipe damaged area.
2496 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2497 new_plane_state, i) {
2498 struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2501 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2504 if (!new_plane_state->uapi.visible &&
2505 !old_plane_state->uapi.visible)
2508 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2514 * If visibility or plane moved, mark the whole plane area as
2515 * damaged as it needs to be complete redraw in the new and old
2518 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2519 !drm_rect_equals(&new_plane_state->uapi.dst,
2520 &old_plane_state->uapi.dst)) {
2521 if (old_plane_state->uapi.visible) {
2522 damaged_area.y1 = old_plane_state->uapi.dst.y1;
2523 damaged_area.y2 = old_plane_state->uapi.dst.y2;
2524 clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2525 &crtc_state->pipe_src);
2528 if (new_plane_state->uapi.visible) {
2529 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2530 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2531 clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2532 &crtc_state->pipe_src);
2535 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2536 /* If alpha changed mark the whole plane area as damaged */
2537 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2538 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2539 clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2540 &crtc_state->pipe_src);
2544 src = drm_plane_state_src(&new_plane_state->uapi);
2545 drm_rect_fp_to_int(&src, &src);
2547 if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2548 &new_plane_state->uapi, &damaged_area))
2551 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2552 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2553 damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2554 damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2556 clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
2560 * TODO: For now we are just using full update in case
2561 * selective fetch area calculation fails. To optimize this we
2562 * should identify cases where this happens and fix the area
2563 * calculation for those.
2565 if (crtc_state->psr2_su_area.y1 == -1) {
2566 drm_info_once(&dev_priv->drm,
2567 "Selective fetch area calculation failed in pipe %c\n",
2568 pipe_name(crtc->pipe));
2573 goto skip_sel_fetch_set_loop;
2575 /* Wa_14014971492 */
2576 if ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
2577 IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
2578 crtc_state->splitter.enable)
2579 crtc_state->psr2_su_area.y1 = 0;
2581 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2586 * Adjust su area to cover cursor fully as necessary (early
2587 * transport). This needs to be done after
2588 * drm_atomic_add_affected_planes to ensure visible cursor is added into
2589 * affected planes even when cursor is not updated by itself.
2591 intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
2593 intel_psr2_sel_fetch_pipe_alignment(crtc_state);
2596 * Now that we have the pipe damaged area check if it intersect with
2597 * every plane, if it does set the plane selective fetch area.
2599 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2600 new_plane_state, i) {
2601 struct drm_rect *sel_fetch_area, inter;
2602 struct intel_plane *linked = new_plane_state->planar_linked_plane;
2604 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2605 !new_plane_state->uapi.visible)
2608 inter = crtc_state->psr2_su_area;
2609 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2610 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
2611 sel_fetch_area->y1 = -1;
2612 sel_fetch_area->y2 = -1;
2614 * if plane sel fetch was previously enabled ->
2617 if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
2618 crtc_state->update_planes |= BIT(plane->id);
2623 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2628 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2629 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2630 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2631 crtc_state->update_planes |= BIT(plane->id);
2634 * Sel_fetch_area is calculated for UV plane. Use
2635 * same area for Y plane as well.
2638 struct intel_plane_state *linked_new_plane_state;
2639 struct drm_rect *linked_sel_fetch_area;
2641 linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2642 if (IS_ERR(linked_new_plane_state))
2643 return PTR_ERR(linked_new_plane_state);
2645 linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2646 linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2647 linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2648 crtc_state->update_planes |= BIT(linked->id);
2652 skip_sel_fetch_set_loop:
2653 psr2_man_trk_ctl_calc(crtc_state, full_update);
2654 crtc_state->pipe_srcsz_early_tpt =
2655 psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update,
2660 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2661 struct intel_crtc *crtc)
2663 struct drm_i915_private *i915 = to_i915(state->base.dev);
2664 const struct intel_crtc_state *old_crtc_state =
2665 intel_atomic_get_old_crtc_state(state, crtc);
2666 const struct intel_crtc_state *new_crtc_state =
2667 intel_atomic_get_new_crtc_state(state, crtc);
2668 struct intel_encoder *encoder;
2673 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2674 old_crtc_state->uapi.encoder_mask) {
2675 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2676 struct intel_psr *psr = &intel_dp->psr;
2677 bool needs_to_disable = false;
2679 mutex_lock(&psr->lock);
2682 * Reasons to disable:
2683 * - PSR disabled in new state
2684 * - All planes will go inactive
2685 * - Changing between PSR versions
2686 * - Display WA #1136: skl, bxt
2688 needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2689 needs_to_disable |= !new_crtc_state->has_psr;
2690 needs_to_disable |= !new_crtc_state->active_planes;
2691 needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
2692 needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2693 new_crtc_state->wm_level_disabled;
2695 if (psr->enabled && needs_to_disable)
2696 intel_psr_disable_locked(intel_dp);
2697 else if (psr->enabled && new_crtc_state->wm_level_disabled)
2698 /* Wa_14015648006 */
2699 wm_optimization_wa(intel_dp, new_crtc_state);
2701 mutex_unlock(&psr->lock);
2705 void intel_psr_post_plane_update(struct intel_atomic_state *state,
2706 struct intel_crtc *crtc)
2708 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2709 const struct intel_crtc_state *crtc_state =
2710 intel_atomic_get_new_crtc_state(state, crtc);
2711 struct intel_encoder *encoder;
2713 if (!crtc_state->has_psr)
2716 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2717 crtc_state->uapi.encoder_mask) {
2718 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2719 struct intel_psr *psr = &intel_dp->psr;
2720 bool keep_disabled = false;
2722 mutex_lock(&psr->lock);
2724 drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2726 keep_disabled |= psr->sink_not_reliable;
2727 keep_disabled |= !crtc_state->active_planes;
2729 /* Display WA #1136: skl, bxt */
2730 keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2731 crtc_state->wm_level_disabled;
2733 if (!psr->enabled && !keep_disabled)
2734 intel_psr_enable_locked(intel_dp, crtc_state);
2735 else if (psr->enabled && !crtc_state->wm_level_disabled)
2736 /* Wa_14015648006 */
2737 wm_optimization_wa(intel_dp, crtc_state);
2739 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2740 if (crtc_state->crc_enabled && psr->enabled)
2741 psr_force_hw_tracking_exit(intel_dp);
2744 * Clear possible busy bits in case we have
2745 * invalidate -> flip -> flush sequence.
2747 intel_dp->psr.busy_frontbuffer_bits = 0;
2749 mutex_unlock(&psr->lock);
2753 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2755 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2756 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2759 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2760 * As all higher states has bit 4 of PSR2 state set we can just wait for
2761 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2763 return intel_de_wait_for_clear(dev_priv,
2764 EDP_PSR2_STATUS(cpu_transcoder),
2765 EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2768 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2770 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2771 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2774 * From bspec: Panel Self Refresh (BDW+)
2775 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2776 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2777 * defensive enough to cover everything.
2779 return intel_de_wait_for_clear(dev_priv,
2780 psr_status_reg(dev_priv, cpu_transcoder),
2781 EDP_PSR_STATUS_STATE_MASK, 50);
2785 * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2786 * @new_crtc_state: new CRTC state
2788 * This function is expected to be called from pipe_update_start() where it is
2789 * not expected to race with PSR enable or disable.
2791 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2793 struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2794 struct intel_encoder *encoder;
2796 if (!new_crtc_state->has_psr)
2799 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2800 new_crtc_state->uapi.encoder_mask) {
2801 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2804 lockdep_assert_held(&intel_dp->psr.lock);
2806 if (!intel_dp->psr.enabled)
2809 if (intel_dp->psr.psr2_enabled)
2810 ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2812 ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2815 drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2819 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2821 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2822 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2827 if (!intel_dp->psr.enabled)
2830 if (intel_dp->psr.psr2_enabled) {
2831 reg = EDP_PSR2_STATUS(cpu_transcoder);
2832 mask = EDP_PSR2_STATUS_STATE_MASK;
2834 reg = psr_status_reg(dev_priv, cpu_transcoder);
2835 mask = EDP_PSR_STATUS_STATE_MASK;
2838 mutex_unlock(&intel_dp->psr.lock);
2840 err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2842 drm_err(&dev_priv->drm,
2843 "Timed out waiting for PSR Idle for re-enable\n");
2845 /* After the unlocked wait, verify that PSR is still wanted! */
2846 mutex_lock(&intel_dp->psr.lock);
2847 return err == 0 && intel_dp->psr.enabled;
2850 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2852 struct drm_connector_list_iter conn_iter;
2853 struct drm_modeset_acquire_ctx ctx;
2854 struct drm_atomic_state *state;
2855 struct drm_connector *conn;
2858 state = drm_atomic_state_alloc(&dev_priv->drm);
2862 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2864 state->acquire_ctx = &ctx;
2865 to_intel_atomic_state(state)->internal = true;
2868 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2869 drm_for_each_connector_iter(conn, &conn_iter) {
2870 struct drm_connector_state *conn_state;
2871 struct drm_crtc_state *crtc_state;
2873 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2876 conn_state = drm_atomic_get_connector_state(state, conn);
2877 if (IS_ERR(conn_state)) {
2878 err = PTR_ERR(conn_state);
2882 if (!conn_state->crtc)
2885 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2886 if (IS_ERR(crtc_state)) {
2887 err = PTR_ERR(crtc_state);
2891 /* Mark mode as changed to trigger a pipe->update() */
2892 crtc_state->mode_changed = true;
2894 drm_connector_list_iter_end(&conn_iter);
2897 err = drm_atomic_commit(state);
2899 if (err == -EDEADLK) {
2900 drm_atomic_state_clear(state);
2901 err = drm_modeset_backoff(&ctx);
2906 drm_modeset_drop_locks(&ctx);
2907 drm_modeset_acquire_fini(&ctx);
2908 drm_atomic_state_put(state);
2913 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2915 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2916 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2920 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2921 mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2922 drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2926 ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2930 old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2931 intel_dp->psr.debug = val;
2934 * Do it right away if it's already enabled, otherwise it will be done
2935 * when enabling the source.
2937 if (intel_dp->psr.enabled)
2938 psr_irq_control(intel_dp);
2940 mutex_unlock(&intel_dp->psr.lock);
2942 if (old_mode != mode)
2943 ret = intel_psr_fastset_force(dev_priv);
2948 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2950 struct intel_psr *psr = &intel_dp->psr;
2952 intel_psr_disable_locked(intel_dp);
2953 psr->sink_not_reliable = true;
2954 /* let's make sure that sink is awaken */
2955 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2958 static void intel_psr_work(struct work_struct *work)
2960 struct intel_dp *intel_dp =
2961 container_of(work, typeof(*intel_dp), psr.work);
2963 mutex_lock(&intel_dp->psr.lock);
2965 if (!intel_dp->psr.enabled)
2968 if (READ_ONCE(intel_dp->psr.irq_aux_error))
2969 intel_psr_handle_irq(intel_dp);
2972 * We have to make sure PSR is ready for re-enable
2973 * otherwise it keeps disabled until next full enable/disable cycle.
2974 * PSR might take some time to get fully disabled
2975 * and be ready for re-enable.
2977 if (!__psr_wait_for_idle_locked(intel_dp))
2981 * The delayed work can race with an invalidate hence we need to
2982 * recheck. Since psr_flush first clears this and then reschedules we
2983 * won't ever miss a flush when bailing out here.
2985 if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2988 intel_psr_activate(intel_dp);
2990 mutex_unlock(&intel_dp->psr.lock);
2993 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2995 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2996 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2998 if (intel_dp->psr.psr2_sel_fetch_enabled) {
3001 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3002 /* Send one update otherwise lag is observed in screen */
3003 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
3007 val = man_trk_ctl_enable_bit_get(dev_priv) |
3008 man_trk_ctl_partial_frame_bit_get(dev_priv) |
3009 man_trk_ctl_continuos_full_frame(dev_priv);
3010 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val);
3011 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
3012 intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
3014 intel_psr_exit(intel_dp);
3019 * intel_psr_invalidate - Invalidate PSR
3020 * @dev_priv: i915 device
3021 * @frontbuffer_bits: frontbuffer plane tracking bits
3022 * @origin: which operation caused the invalidate
3024 * Since the hardware frontbuffer tracking has gaps we need to integrate
3025 * with the software frontbuffer tracking. This function gets called every
3026 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
3027 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
3029 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
3031 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
3032 unsigned frontbuffer_bits, enum fb_op_origin origin)
3034 struct intel_encoder *encoder;
3036 if (origin == ORIGIN_FLIP)
3039 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3040 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3041 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3043 mutex_lock(&intel_dp->psr.lock);
3044 if (!intel_dp->psr.enabled) {
3045 mutex_unlock(&intel_dp->psr.lock);
3049 pipe_frontbuffer_bits &=
3050 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3051 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
3053 if (pipe_frontbuffer_bits)
3054 _psr_invalidate_handle(intel_dp);
3056 mutex_unlock(&intel_dp->psr.lock);
3060 * When we will be completely rely on PSR2 S/W tracking in future,
3061 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
3062 * event also therefore tgl_dc3co_flush_locked() require to be changed
3063 * accordingly in future.
3066 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
3067 enum fb_op_origin origin)
3069 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3071 if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
3072 !intel_dp->psr.active)
3076 * At every frontbuffer flush flip event modified delay of delayed work,
3077 * when delayed work schedules that means display has been idle.
3079 if (!(frontbuffer_bits &
3080 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
3083 tgl_psr2_enable_dc3co(intel_dp);
3084 mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
3085 intel_dp->psr.dc3co_exit_delay);
3088 static void _psr_flush_handle(struct intel_dp *intel_dp)
3090 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3091 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3093 if (intel_dp->psr.psr2_sel_fetch_enabled) {
3094 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3095 /* can we turn CFF off? */
3096 if (intel_dp->psr.busy_frontbuffer_bits == 0) {
3097 u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
3098 man_trk_ctl_partial_frame_bit_get(dev_priv) |
3099 man_trk_ctl_single_full_frame_bit_get(dev_priv) |
3100 man_trk_ctl_continuos_full_frame(dev_priv);
3103 * Set psr2_sel_fetch_cff_enabled as false to allow selective
3104 * updates. Still keep cff bit enabled as we don't have proper
3105 * SU configuration in case update is sent for any reason after
3106 * sff bit gets cleared by the HW on next vblank.
3108 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
3110 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
3111 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
3115 * continuous full frame is disabled, only a single full
3118 psr_force_hw_tracking_exit(intel_dp);
3121 psr_force_hw_tracking_exit(intel_dp);
3123 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
3124 queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
3129 * intel_psr_flush - Flush PSR
3130 * @dev_priv: i915 device
3131 * @frontbuffer_bits: frontbuffer plane tracking bits
3132 * @origin: which operation caused the flush
3134 * Since the hardware frontbuffer tracking has gaps we need to integrate
3135 * with the software frontbuffer tracking. This function gets called every
3136 * time frontbuffer rendering has completed and flushed out to memory. PSR
3137 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
3139 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
3141 void intel_psr_flush(struct drm_i915_private *dev_priv,
3142 unsigned frontbuffer_bits, enum fb_op_origin origin)
3144 struct intel_encoder *encoder;
3146 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3147 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3148 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3150 mutex_lock(&intel_dp->psr.lock);
3151 if (!intel_dp->psr.enabled) {
3152 mutex_unlock(&intel_dp->psr.lock);
3156 pipe_frontbuffer_bits &=
3157 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3158 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
3161 * If the PSR is paused by an explicit intel_psr_paused() call,
3162 * we have to ensure that the PSR is not activated until
3163 * intel_psr_resume() is called.
3165 if (intel_dp->psr.paused)
3168 if (origin == ORIGIN_FLIP ||
3169 (origin == ORIGIN_CURSOR_UPDATE &&
3170 !intel_dp->psr.psr2_sel_fetch_enabled)) {
3171 tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
3175 if (pipe_frontbuffer_bits == 0)
3178 /* By definition flush = invalidate + flush */
3179 _psr_flush_handle(intel_dp);
3181 mutex_unlock(&intel_dp->psr.lock);
3186 * intel_psr_init - Init basic PSR work and mutex.
3187 * @intel_dp: Intel DP
3189 * This function is called after the initializing connector.
3190 * (the initializing of connector treats the handling of connector capabilities)
3191 * And it initializes basic PSR stuff for each DP Encoder.
3193 void intel_psr_init(struct intel_dp *intel_dp)
3195 struct intel_connector *connector = intel_dp->attached_connector;
3196 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3197 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3199 if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
3203 * HSW spec explicitly says PSR is tied to port A.
3204 * BDW+ platforms have a instance of PSR registers per transcoder but
3205 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
3207 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
3208 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
3209 * But GEN12 supports a instance of PSR registers per transcoder.
3211 if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
3212 drm_dbg_kms(&dev_priv->drm,
3213 "PSR condition failed: Port not supported\n");
3217 if (HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp))
3218 intel_dp->psr.source_panel_replay_support = true;
3220 intel_dp->psr.source_support = true;
3222 /* Disable early transport for now */
3223 intel_dp->psr.debug |= I915_PSR_DEBUG_SU_REGION_ET_DISABLE;
3225 /* Set link_standby x link_off defaults */
3226 if (DISPLAY_VER(dev_priv) < 12)
3227 /* For new platforms up to TGL let's respect VBT back again */
3228 intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
3230 INIT_WORK(&intel_dp->psr.work, intel_psr_work);
3231 INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
3232 mutex_init(&intel_dp->psr.lock);
3235 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
3236 u8 *status, u8 *error_status)
3238 struct drm_dp_aux *aux = &intel_dp->aux;
3240 unsigned int offset;
3242 offset = intel_dp->psr.panel_replay_enabled ?
3243 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
3245 ret = drm_dp_dpcd_readb(aux, offset, status);
3249 offset = intel_dp->psr.panel_replay_enabled ?
3250 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
3252 ret = drm_dp_dpcd_readb(aux, offset, error_status);
3256 *status = *status & DP_PSR_SINK_STATE_MASK;
3261 static void psr_alpm_check(struct intel_dp *intel_dp)
3263 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3264 struct drm_dp_aux *aux = &intel_dp->aux;
3265 struct intel_psr *psr = &intel_dp->psr;
3269 if (!psr->psr2_enabled)
3272 r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
3274 drm_err(&dev_priv->drm, "Error reading ALPM status\n");
3278 if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
3279 intel_psr_disable_locked(intel_dp);
3280 psr->sink_not_reliable = true;
3281 drm_dbg_kms(&dev_priv->drm,
3282 "ALPM lock timeout error, disabling PSR\n");
3284 /* Clearing error */
3285 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
3289 static void psr_capability_changed_check(struct intel_dp *intel_dp)
3291 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3292 struct intel_psr *psr = &intel_dp->psr;
3296 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
3298 drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
3302 if (val & DP_PSR_CAPS_CHANGE) {
3303 intel_psr_disable_locked(intel_dp);
3304 psr->sink_not_reliable = true;
3305 drm_dbg_kms(&dev_priv->drm,
3306 "Sink PSR capability changed, disabling PSR\n");
3309 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
3315 * DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR
3316 * DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR
3317 * DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR
3318 * this function is relying on PSR definitions
3320 void intel_psr_short_pulse(struct intel_dp *intel_dp)
3322 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3323 struct intel_psr *psr = &intel_dp->psr;
3324 u8 status, error_status;
3325 const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
3326 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3327 DP_PSR_LINK_CRC_ERROR;
3329 if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
3332 mutex_lock(&psr->lock);
3337 if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
3338 drm_err(&dev_priv->drm,
3339 "Error reading PSR status or error status\n");
3343 if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) ||
3344 (error_status & errors)) {
3345 intel_psr_disable_locked(intel_dp);
3346 psr->sink_not_reliable = true;
3349 if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
3351 drm_dbg_kms(&dev_priv->drm,
3352 "PSR sink internal error, disabling PSR\n");
3353 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3354 drm_dbg_kms(&dev_priv->drm,
3355 "PSR RFB storage error, disabling PSR\n");
3356 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3357 drm_dbg_kms(&dev_priv->drm,
3358 "PSR VSC SDP uncorrectable error, disabling PSR\n");
3359 if (error_status & DP_PSR_LINK_CRC_ERROR)
3360 drm_dbg_kms(&dev_priv->drm,
3361 "PSR Link CRC error, disabling PSR\n");
3363 if (error_status & ~errors)
3364 drm_err(&dev_priv->drm,
3365 "PSR_ERROR_STATUS unhandled errors %x\n",
3366 error_status & ~errors);
3367 /* clear status register */
3368 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
3370 if (!psr->panel_replay_enabled) {
3371 psr_alpm_check(intel_dp);
3372 psr_capability_changed_check(intel_dp);
3376 mutex_unlock(&psr->lock);
3379 bool intel_psr_enabled(struct intel_dp *intel_dp)
3383 if (!CAN_PSR(intel_dp))
3386 mutex_lock(&intel_dp->psr.lock);
3387 ret = intel_dp->psr.enabled;
3388 mutex_unlock(&intel_dp->psr.lock);
3394 * intel_psr_lock - grab PSR lock
3395 * @crtc_state: the crtc state
3397 * This is initially meant to be used by around CRTC update, when
3398 * vblank sensitive registers are updated and we need grab the lock
3399 * before it to avoid vblank evasion.
3401 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
3403 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3404 struct intel_encoder *encoder;
3406 if (!crtc_state->has_psr)
3409 for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
3410 crtc_state->uapi.encoder_mask) {
3411 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3413 mutex_lock(&intel_dp->psr.lock);
3419 * intel_psr_unlock - release PSR lock
3420 * @crtc_state: the crtc state
3422 * Release the PSR lock that was held during pipe update.
3424 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
3426 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3427 struct intel_encoder *encoder;
3429 if (!crtc_state->has_psr)
3432 for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
3433 crtc_state->uapi.encoder_mask) {
3434 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3436 mutex_unlock(&intel_dp->psr.lock);
3442 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
3444 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3445 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3446 const char *status = "unknown";
3447 u32 val, status_val;
3449 if (intel_dp->psr.psr2_enabled) {
3450 static const char * const live_status[] = {
3463 val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder));
3464 status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
3465 if (status_val < ARRAY_SIZE(live_status))
3466 status = live_status[status_val];
3468 static const char * const live_status[] = {
3478 val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
3479 status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
3480 if (status_val < ARRAY_SIZE(live_status))
3481 status = live_status[status_val];
3484 seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
3487 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
3489 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3490 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3491 struct intel_psr *psr = &intel_dp->psr;
3492 intel_wakeref_t wakeref;
3497 seq_printf(m, "Sink support: PSR = %s",
3498 str_yes_no(psr->sink_support));
3500 if (psr->sink_support)
3501 seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
3502 seq_printf(m, ", Panel Replay = %s\n", str_yes_no(psr->sink_panel_replay_support));
3504 if (!(psr->sink_support || psr->sink_panel_replay_support))
3507 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3508 mutex_lock(&psr->lock);
3510 if (psr->panel_replay_enabled)
3511 status = "Panel Replay Enabled";
3512 else if (psr->enabled)
3513 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
3515 status = "disabled";
3516 seq_printf(m, "PSR mode: %s\n", status);
3518 if (!psr->enabled) {
3519 seq_printf(m, "PSR sink not reliable: %s\n",
3520 str_yes_no(psr->sink_not_reliable));
3525 if (psr->panel_replay_enabled) {
3526 val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder));
3527 enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
3528 } else if (psr->psr2_enabled) {
3529 val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
3530 enabled = val & EDP_PSR2_ENABLE;
3532 val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
3533 enabled = val & EDP_PSR_ENABLE;
3535 seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
3536 str_enabled_disabled(enabled), val);
3537 psr_source_status(intel_dp, m);
3538 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
3539 psr->busy_frontbuffer_bits);
3542 * SKL+ Perf counter is reset to 0 everytime DC state is entered
3544 val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
3545 seq_printf(m, "Performance counter: %u\n",
3546 REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
3548 if (psr->debug & I915_PSR_DEBUG_IRQ) {
3549 seq_printf(m, "Last attempted entry at: %lld\n",
3550 psr->last_entry_attempt);
3551 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3554 if (psr->psr2_enabled) {
3555 u32 su_frames_val[3];
3559 * Reading all 3 registers before hand to minimize crossing a
3560 * frame boundary between register reads
3562 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3563 val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame));
3564 su_frames_val[frame / 3] = val;
3567 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3569 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3572 su_blocks = su_frames_val[frame / 3] &
3573 PSR2_SU_STATUS_MASK(frame);
3574 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3575 seq_printf(m, "%d\t%d\n", frame, su_blocks);
3578 seq_printf(m, "PSR2 selective fetch: %s\n",
3579 str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3583 mutex_unlock(&psr->lock);
3584 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3589 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3591 struct drm_i915_private *dev_priv = m->private;
3592 struct intel_dp *intel_dp = NULL;
3593 struct intel_encoder *encoder;
3595 if (!HAS_PSR(dev_priv))
3598 /* Find the first EDP which supports PSR */
3599 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3600 intel_dp = enc_to_intel_dp(encoder);
3607 return intel_psr_status(m, intel_dp);
3609 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3612 i915_edp_psr_debug_set(void *data, u64 val)
3614 struct drm_i915_private *dev_priv = data;
3615 struct intel_encoder *encoder;
3616 intel_wakeref_t wakeref;
3619 if (!HAS_PSR(dev_priv))
3622 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3623 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3625 drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
3627 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3629 // TODO: split to each transcoder's PSR debug state
3630 ret = intel_psr_debug_set(intel_dp, val);
3632 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3639 i915_edp_psr_debug_get(void *data, u64 *val)
3641 struct drm_i915_private *dev_priv = data;
3642 struct intel_encoder *encoder;
3644 if (!HAS_PSR(dev_priv))
3647 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3648 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3650 // TODO: split to each transcoder's PSR debug state
3651 *val = READ_ONCE(intel_dp->psr.debug);
3658 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3659 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3662 void intel_psr_debugfs_register(struct drm_i915_private *i915)
3664 struct drm_minor *minor = i915->drm.primary;
3666 debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3667 i915, &i915_edp_psr_debug_fops);
3669 debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3670 i915, &i915_edp_psr_status_fops);
3673 static const char *psr_mode_str(struct intel_dp *intel_dp)
3675 if (intel_dp->psr.panel_replay_enabled)
3676 return "PANEL-REPLAY";
3677 else if (intel_dp->psr.enabled)
3683 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3685 struct intel_connector *connector = m->private;
3686 struct intel_dp *intel_dp = intel_attached_dp(connector);
3687 static const char * const sink_status[] = {
3689 "transition to active, capture and display",
3690 "active, display from RFB",
3691 "active, capture and display on sink device timings",
3692 "transition to inactive, capture and display, timing re-sync",
3695 "sink internal error",
3697 static const char * const panel_replay_status[] = {
3698 "Sink device frame is locked to the Source device",
3699 "Sink device is coasting, using the VTotal target",
3700 "Sink device is governing the frame rate (frame rate unlock is granted)",
3701 "Sink device in the process of re-locking with the Source device",
3705 u8 status, error_status;
3708 if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
3709 seq_puts(m, "PSR/Panel-Replay Unsupported\n");
3713 if (connector->base.status != connector_status_connected)
3716 ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
3721 if (intel_dp->psr.panel_replay_enabled) {
3722 idx = (status & DP_SINK_FRAME_LOCKED_MASK) >> DP_SINK_FRAME_LOCKED_SHIFT;
3723 if (idx < ARRAY_SIZE(panel_replay_status))
3724 str = panel_replay_status[idx];
3725 } else if (intel_dp->psr.enabled) {
3726 idx = status & DP_PSR_SINK_STATE_MASK;
3727 if (idx < ARRAY_SIZE(sink_status))
3728 str = sink_status[idx];
3731 seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
3733 seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
3735 if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
3736 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3737 DP_PSR_LINK_CRC_ERROR))
3741 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3742 seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
3743 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3744 seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
3745 if (error_status & DP_PSR_LINK_CRC_ERROR)
3746 seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
3750 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3752 static int i915_psr_status_show(struct seq_file *m, void *data)
3754 struct intel_connector *connector = m->private;
3755 struct intel_dp *intel_dp = intel_attached_dp(connector);
3757 return intel_psr_status(m, intel_dp);
3759 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3761 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3763 struct drm_i915_private *i915 = to_i915(connector->base.dev);
3764 struct dentry *root = connector->base.debugfs_entry;
3766 /* TODO: Add support for MST connectors as well. */
3767 if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
3768 connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
3769 connector->mst_port)
3772 debugfs_create_file("i915_psr_sink_status", 0444, root,
3773 connector, &i915_psr_sink_status_fops);
3775 if (HAS_PSR(i915) || HAS_DP20(i915))
3776 debugfs_create_file("i915_psr_status", 0444, root,
3777 connector, &i915_psr_status_fops);