]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/display/intel_psr.c
net: wan: Add framer framework support
[linux.git] / drivers / gpu / drm / i915 / display / intel_psr.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
26 #include <drm/drm_debugfs.h>
27
28 #include "i915_drv.h"
29 #include "i915_reg.h"
30 #include "intel_atomic.h"
31 #include "intel_crtc.h"
32 #include "intel_de.h"
33 #include "intel_display_types.h"
34 #include "intel_dp.h"
35 #include "intel_dp_aux.h"
36 #include "intel_frontbuffer.h"
37 #include "intel_hdmi.h"
38 #include "intel_psr.h"
39 #include "intel_psr_regs.h"
40 #include "intel_snps_phy.h"
41 #include "skl_universal_plane.h"
42
43 /**
44  * DOC: Panel Self Refresh (PSR/SRD)
45  *
46  * Since Haswell Display controller supports Panel Self-Refresh on display
47  * panels witch have a remote frame buffer (RFB) implemented according to PSR
48  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
49  * when system is idle but display is on as it eliminates display refresh
50  * request to DDR memory completely as long as the frame buffer for that
51  * display is unchanged.
52  *
53  * Panel Self Refresh must be supported by both Hardware (source) and
54  * Panel (sink).
55  *
56  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
57  * to power down the link and memory controller. For DSI panels the same idea
58  * is called "manual mode".
59  *
60  * The implementation uses the hardware-based PSR support which automatically
61  * enters/exits self-refresh mode. The hardware takes care of sending the
62  * required DP aux message and could even retrain the link (that part isn't
63  * enabled yet though). The hardware also keeps track of any frontbuffer
64  * changes to know when to exit self-refresh mode again. Unfortunately that
65  * part doesn't work too well, hence why the i915 PSR support uses the
66  * software frontbuffer tracking to make sure it doesn't miss a screen
67  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
68  * get called by the frontbuffer tracking code. Note that because of locking
69  * issues the self-refresh re-enable code is done from a work queue, which
70  * must be correctly synchronized/cancelled when shutting down the pipe."
71  *
72  * DC3CO (DC3 clock off)
73  *
74  * On top of PSR2, GEN12 adds a intermediate power savings state that turns
75  * clock off automatically during PSR2 idle state.
76  * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
77  * entry/exit allows the HW to enter a low-power state even when page flipping
78  * periodically (for instance a 30fps video playback scenario).
79  *
80  * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
81  * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
82  * frames, if no other flip occurs and the function above is executed, DC3CO is
83  * disabled and PSR2 is configured to enter deep sleep, resetting again in case
84  * of another flip.
85  * Front buffer modifications do not trigger DC3CO activation on purpose as it
86  * would bring a lot of complexity and most of the moderns systems will only
87  * use page flips.
88  */
89
90 /*
91  * Description of PSR mask bits:
92  *
93  * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
94  *
95  *  When unmasked (nearly) all display register writes (eg. even
96  *  SWF) trigger a PSR exit. Some registers are excluded from this
97  *  and they have a more specific mask (described below). On icl+
98  *  this bit no longer exists and is effectively always set.
99  *
100  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
101  *
102  *  When unmasked (nearly) all pipe/plane register writes
103  *  trigger a PSR exit. Some plane registers are excluded from this
104  *  and they have a more specific mask (described below).
105  *
106  * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
107  * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
108  * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
109  *
110  *  When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
111  *  SPR_SURF/CURBASE are not included in this and instead are
112  *  controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
113  *  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
114  *
115  * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
116  * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
117  *
118  *  When unmasked PSR is blocked as long as the sprite
119  *  plane is enabled. skl+ with their universal planes no
120  *  longer have a mask bit like this, and no plane being
121  *  enabledb blocks PSR.
122  *
123  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
124  * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
125  *
126  *  When umasked CURPOS writes trigger a PSR exit. On skl+
127  *  this doesn't exit but CURPOS is included in the
128  *  PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
129  *
130  * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
131  * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
132  *
133  *  When unmasked PSR is blocked as long as vblank and/or vsync
134  *  interrupt is unmasked in IMR *and* enabled in IER.
135  *
136  * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
137  * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
138  *
139  *  Selectcs whether PSR exit generates an extra vblank before
140  *  the first frame is transmitted. Also note the opposite polarity
141  *  if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
142  *  unmasked==do not generate the extra vblank).
143  *
144  *  With DC states enabled the extra vblank happens after link training,
145  *  with DC states disabled it happens immediately upuon PSR exit trigger.
146  *  No idea as of now why there is a difference. HSW/BDW (which don't
147  *  even have DMC) always generate it after link training. Go figure.
148  *
149  *  Unfortunately CHICKEN_TRANS itself seems to be double buffered
150  *  and thus won't latch until the first vblank. So with DC states
151  *  enabled the register effctively uses the reset value during DC5
152  *  exit+PSR exit sequence, and thus the bit does nothing until
153  *  latched by the vblank that it was trying to prevent from being
154  *  generated in the first place. So we should probably call this
155  *  one a chicken/egg bit instead on skl+.
156  *
157  *  In standby mode (as opposed to link-off) this makes no difference
158  *  as the timing generator keeps running the whole time generating
159  *  normal periodic vblanks.
160  *
161  *  WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
162  *  and doing so makes the behaviour match the skl+ reset value.
163  *
164  * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
165  * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
166  *
167  *  On BDW without this bit is no vblanks whatsoever are
168  *  generated after PSR exit. On HSW this has no apparant effect.
169  *  WaPsrDPRSUnmaskVBlankInSRD says to set this.
170  *
171  * The rest of the bits are more self-explanatory and/or
172  * irrelevant for normal operation.
173  */
174
175 static bool psr_global_enabled(struct intel_dp *intel_dp)
176 {
177         struct intel_connector *connector = intel_dp->attached_connector;
178         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
179
180         switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
181         case I915_PSR_DEBUG_DEFAULT:
182                 if (i915->params.enable_psr == -1)
183                         return connector->panel.vbt.psr.enable;
184                 return i915->params.enable_psr;
185         case I915_PSR_DEBUG_DISABLE:
186                 return false;
187         default:
188                 return true;
189         }
190 }
191
192 static bool psr2_global_enabled(struct intel_dp *intel_dp)
193 {
194         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
195
196         switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
197         case I915_PSR_DEBUG_DISABLE:
198         case I915_PSR_DEBUG_FORCE_PSR1:
199                 return false;
200         default:
201                 if (i915->params.enable_psr == 1)
202                         return false;
203                 return true;
204         }
205 }
206
207 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
208 {
209         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
210
211         return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
212                 EDP_PSR_ERROR(intel_dp->psr.transcoder);
213 }
214
215 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
216 {
217         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
218
219         return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
220                 EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
221 }
222
223 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
224 {
225         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
226
227         return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
228                 EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
229 }
230
231 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
232 {
233         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
234
235         return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
236                 EDP_PSR_MASK(intel_dp->psr.transcoder);
237 }
238
239 static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
240                               enum transcoder cpu_transcoder)
241 {
242         if (DISPLAY_VER(dev_priv) >= 8)
243                 return EDP_PSR_CTL(cpu_transcoder);
244         else
245                 return HSW_SRD_CTL;
246 }
247
248 static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
249                                 enum transcoder cpu_transcoder)
250 {
251         if (DISPLAY_VER(dev_priv) >= 8)
252                 return EDP_PSR_DEBUG(cpu_transcoder);
253         else
254                 return HSW_SRD_DEBUG;
255 }
256
257 static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
258                                    enum transcoder cpu_transcoder)
259 {
260         if (DISPLAY_VER(dev_priv) >= 8)
261                 return EDP_PSR_PERF_CNT(cpu_transcoder);
262         else
263                 return HSW_SRD_PERF_CNT;
264 }
265
266 static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
267                                  enum transcoder cpu_transcoder)
268 {
269         if (DISPLAY_VER(dev_priv) >= 8)
270                 return EDP_PSR_STATUS(cpu_transcoder);
271         else
272                 return HSW_SRD_STATUS;
273 }
274
275 static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
276                               enum transcoder cpu_transcoder)
277 {
278         if (DISPLAY_VER(dev_priv) >= 12)
279                 return TRANS_PSR_IMR(cpu_transcoder);
280         else
281                 return EDP_PSR_IMR;
282 }
283
284 static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
285                               enum transcoder cpu_transcoder)
286 {
287         if (DISPLAY_VER(dev_priv) >= 12)
288                 return TRANS_PSR_IIR(cpu_transcoder);
289         else
290                 return EDP_PSR_IIR;
291 }
292
293 static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
294                                   enum transcoder cpu_transcoder)
295 {
296         if (DISPLAY_VER(dev_priv) >= 8)
297                 return EDP_PSR_AUX_CTL(cpu_transcoder);
298         else
299                 return HSW_SRD_AUX_CTL;
300 }
301
302 static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
303                                    enum transcoder cpu_transcoder, int i)
304 {
305         if (DISPLAY_VER(dev_priv) >= 8)
306                 return EDP_PSR_AUX_DATA(cpu_transcoder, i);
307         else
308                 return HSW_SRD_AUX_DATA(i);
309 }
310
311 static void psr_irq_control(struct intel_dp *intel_dp)
312 {
313         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
314         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
315         u32 mask;
316
317         mask = psr_irq_psr_error_bit_get(intel_dp);
318         if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
319                 mask |= psr_irq_post_exit_bit_get(intel_dp) |
320                         psr_irq_pre_entry_bit_get(intel_dp);
321
322         intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
323                      psr_irq_mask_get(intel_dp), ~mask);
324 }
325
326 static void psr_event_print(struct drm_i915_private *i915,
327                             u32 val, bool psr2_enabled)
328 {
329         drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
330         if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
331                 drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
332         if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
333                 drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
334         if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
335                 drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
336         if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
337                 drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
338         if (val & PSR_EVENT_GRAPHICS_RESET)
339                 drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
340         if (val & PSR_EVENT_PCH_INTERRUPT)
341                 drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
342         if (val & PSR_EVENT_MEMORY_UP)
343                 drm_dbg_kms(&i915->drm, "\tMemory up\n");
344         if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
345                 drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
346         if (val & PSR_EVENT_WD_TIMER_EXPIRE)
347                 drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
348         if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
349                 drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
350         if (val & PSR_EVENT_REGISTER_UPDATE)
351                 drm_dbg_kms(&i915->drm, "\tRegister updated\n");
352         if (val & PSR_EVENT_HDCP_ENABLE)
353                 drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
354         if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
355                 drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
356         if (val & PSR_EVENT_VBI_ENABLE)
357                 drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
358         if (val & PSR_EVENT_LPSP_MODE_EXIT)
359                 drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
360         if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
361                 drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
362 }
363
364 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
365 {
366         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
367         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
368         ktime_t time_ns =  ktime_get();
369
370         if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
371                 intel_dp->psr.last_entry_attempt = time_ns;
372                 drm_dbg_kms(&dev_priv->drm,
373                             "[transcoder %s] PSR entry attempt in 2 vblanks\n",
374                             transcoder_name(cpu_transcoder));
375         }
376
377         if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
378                 intel_dp->psr.last_exit = time_ns;
379                 drm_dbg_kms(&dev_priv->drm,
380                             "[transcoder %s] PSR exit completed\n",
381                             transcoder_name(cpu_transcoder));
382
383                 if (DISPLAY_VER(dev_priv) >= 9) {
384                         u32 val;
385
386                         val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0);
387
388                         psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled);
389                 }
390         }
391
392         if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
393                 drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
394                          transcoder_name(cpu_transcoder));
395
396                 intel_dp->psr.irq_aux_error = true;
397
398                 /*
399                  * If this interruption is not masked it will keep
400                  * interrupting so fast that it prevents the scheduled
401                  * work to run.
402                  * Also after a PSR error, we don't want to arm PSR
403                  * again so we don't care about unmask the interruption
404                  * or unset irq_aux_error.
405                  */
406                 intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
407                              0, psr_irq_psr_error_bit_get(intel_dp));
408
409                 queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
410         }
411 }
412
413 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
414 {
415         u8 alpm_caps = 0;
416
417         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
418                               &alpm_caps) != 1)
419                 return false;
420         return alpm_caps & DP_ALPM_CAP;
421 }
422
423 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
424 {
425         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
426         u8 val = 8; /* assume the worst if we can't read the value */
427
428         if (drm_dp_dpcd_readb(&intel_dp->aux,
429                               DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
430                 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
431         else
432                 drm_dbg_kms(&i915->drm,
433                             "Unable to get sink synchronization latency, assuming 8 frames\n");
434         return val;
435 }
436
437 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
438 {
439         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
440         ssize_t r;
441         u16 w;
442         u8 y;
443
444         /* If sink don't have specific granularity requirements set legacy ones */
445         if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
446                 /* As PSR2 HW sends full lines, we do not care about x granularity */
447                 w = 4;
448                 y = 4;
449                 goto exit;
450         }
451
452         r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
453         if (r != 2)
454                 drm_dbg_kms(&i915->drm,
455                             "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
456         /*
457          * Spec says that if the value read is 0 the default granularity should
458          * be used instead.
459          */
460         if (r != 2 || w == 0)
461                 w = 4;
462
463         r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
464         if (r != 1) {
465                 drm_dbg_kms(&i915->drm,
466                             "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
467                 y = 4;
468         }
469         if (y == 0)
470                 y = 1;
471
472 exit:
473         intel_dp->psr.su_w_granularity = w;
474         intel_dp->psr.su_y_granularity = y;
475 }
476
477 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
478 {
479         struct drm_i915_private *dev_priv =
480                 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
481
482         drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
483                          sizeof(intel_dp->psr_dpcd));
484
485         if (!intel_dp->psr_dpcd[0])
486                 return;
487         drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
488                     intel_dp->psr_dpcd[0]);
489
490         if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
491                 drm_dbg_kms(&dev_priv->drm,
492                             "PSR support not currently available for this panel\n");
493                 return;
494         }
495
496         if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
497                 drm_dbg_kms(&dev_priv->drm,
498                             "Panel lacks power state control, PSR cannot be enabled\n");
499                 return;
500         }
501
502         intel_dp->psr.sink_support = true;
503         intel_dp->psr.sink_sync_latency =
504                 intel_dp_get_sink_sync_latency(intel_dp);
505
506         if (DISPLAY_VER(dev_priv) >= 9 &&
507             (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
508                 bool y_req = intel_dp->psr_dpcd[1] &
509                              DP_PSR2_SU_Y_COORDINATE_REQUIRED;
510                 bool alpm = intel_dp_get_alpm_status(intel_dp);
511
512                 /*
513                  * All panels that supports PSR version 03h (PSR2 +
514                  * Y-coordinate) can handle Y-coordinates in VSC but we are
515                  * only sure that it is going to be used when required by the
516                  * panel. This way panel is capable to do selective update
517                  * without a aux frame sync.
518                  *
519                  * To support PSR version 02h and PSR version 03h without
520                  * Y-coordinate requirement panels we would need to enable
521                  * GTC first.
522                  */
523                 intel_dp->psr.sink_psr2_support = y_req && alpm;
524                 drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
525                             intel_dp->psr.sink_psr2_support ? "" : "not ");
526
527                 if (intel_dp->psr.sink_psr2_support) {
528                         intel_dp->psr.colorimetry_support =
529                                 intel_dp_get_colorimetry_status(intel_dp);
530                         intel_dp_get_su_granularity(intel_dp);
531                 }
532         }
533 }
534
535 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
536 {
537         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
538         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
539         u32 aux_clock_divider, aux_ctl;
540         /* write DP_SET_POWER=D0 */
541         static const u8 aux_msg[] = {
542                 [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
543                 [1] = (DP_SET_POWER >> 8) & 0xff,
544                 [2] = DP_SET_POWER & 0xff,
545                 [3] = 1 - 1,
546                 [4] = DP_SET_POWER_D0,
547         };
548         int i;
549
550         BUILD_BUG_ON(sizeof(aux_msg) > 20);
551         for (i = 0; i < sizeof(aux_msg); i += 4)
552                 intel_de_write(dev_priv,
553                                psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
554                                intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
555
556         aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
557
558         /* Start with bits set for DDI_AUX_CTL register */
559         aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
560                                              aux_clock_divider);
561
562         /* Select only valid bits for SRD_AUX_CTL */
563         aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
564                 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
565                 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
566                 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
567
568         intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
569                        aux_ctl);
570 }
571
572 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
573 {
574         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
575         u8 dpcd_val = DP_PSR_ENABLE;
576
577         /* Enable ALPM at sink for psr2 */
578         if (intel_dp->psr.psr2_enabled) {
579                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
580                                    DP_ALPM_ENABLE |
581                                    DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
582
583                 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
584         } else {
585                 if (intel_dp->psr.link_standby)
586                         dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
587
588                 if (DISPLAY_VER(dev_priv) >= 8)
589                         dpcd_val |= DP_PSR_CRC_VERIFICATION;
590         }
591
592         if (intel_dp->psr.req_psr2_sdp_prior_scanline)
593                 dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
594
595         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
596
597         drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
598 }
599
600 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
601 {
602         struct intel_connector *connector = intel_dp->attached_connector;
603         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
604         u32 val = 0;
605
606         if (DISPLAY_VER(dev_priv) >= 11)
607                 val |= EDP_PSR_TP4_TIME_0us;
608
609         if (dev_priv->params.psr_safest_params) {
610                 val |= EDP_PSR_TP1_TIME_2500us;
611                 val |= EDP_PSR_TP2_TP3_TIME_2500us;
612                 goto check_tp3_sel;
613         }
614
615         if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
616                 val |= EDP_PSR_TP1_TIME_0us;
617         else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
618                 val |= EDP_PSR_TP1_TIME_100us;
619         else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
620                 val |= EDP_PSR_TP1_TIME_500us;
621         else
622                 val |= EDP_PSR_TP1_TIME_2500us;
623
624         if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
625                 val |= EDP_PSR_TP2_TP3_TIME_0us;
626         else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
627                 val |= EDP_PSR_TP2_TP3_TIME_100us;
628         else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
629                 val |= EDP_PSR_TP2_TP3_TIME_500us;
630         else
631                 val |= EDP_PSR_TP2_TP3_TIME_2500us;
632
633         /*
634          * WA 0479: hsw,bdw
635          * "Do not skip both TP1 and TP2/TP3"
636          */
637         if (DISPLAY_VER(dev_priv) < 9 &&
638             connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
639             connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
640                 val |= EDP_PSR_TP2_TP3_TIME_100us;
641
642 check_tp3_sel:
643         if (intel_dp_source_supports_tps3(dev_priv) &&
644             drm_dp_tps3_supported(intel_dp->dpcd))
645                 val |= EDP_PSR_TP_TP1_TP3;
646         else
647                 val |= EDP_PSR_TP_TP1_TP2;
648
649         return val;
650 }
651
652 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
653 {
654         struct intel_connector *connector = intel_dp->attached_connector;
655         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
656         int idle_frames;
657
658         /* Let's use 6 as the minimum to cover all known cases including the
659          * off-by-one issue that HW has in some cases.
660          */
661         idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
662         idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
663
664         if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
665                 idle_frames = 0xf;
666
667         return idle_frames;
668 }
669
670 static void hsw_activate_psr1(struct intel_dp *intel_dp)
671 {
672         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
673         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
674         u32 max_sleep_time = 0x1f;
675         u32 val = EDP_PSR_ENABLE;
676
677         val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
678
679         if (DISPLAY_VER(dev_priv) < 20)
680                 val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
681
682         if (IS_HASWELL(dev_priv))
683                 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
684
685         if (intel_dp->psr.link_standby)
686                 val |= EDP_PSR_LINK_STANDBY;
687
688         val |= intel_psr1_get_tp_time(intel_dp);
689
690         if (DISPLAY_VER(dev_priv) >= 8)
691                 val |= EDP_PSR_CRC_ENABLE;
692
693         intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
694                      ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
695 }
696
697 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
698 {
699         struct intel_connector *connector = intel_dp->attached_connector;
700         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
701         u32 val = 0;
702
703         if (dev_priv->params.psr_safest_params)
704                 return EDP_PSR2_TP2_TIME_2500us;
705
706         if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
707             connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
708                 val |= EDP_PSR2_TP2_TIME_50us;
709         else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
710                 val |= EDP_PSR2_TP2_TIME_100us;
711         else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
712                 val |= EDP_PSR2_TP2_TIME_500us;
713         else
714                 val |= EDP_PSR2_TP2_TIME_2500us;
715
716         return val;
717 }
718
719 static int psr2_block_count_lines(struct intel_dp *intel_dp)
720 {
721         return intel_dp->psr.io_wake_lines < 9 &&
722                 intel_dp->psr.fast_wake_lines < 9 ? 8 : 12;
723 }
724
725 static int psr2_block_count(struct intel_dp *intel_dp)
726 {
727         return psr2_block_count_lines(intel_dp) / 4;
728 }
729
730 static void hsw_activate_psr2(struct intel_dp *intel_dp)
731 {
732         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
733         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
734         u32 val = EDP_PSR2_ENABLE;
735
736         val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
737
738         if (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))
739                 val |= EDP_SU_TRACK_ENABLE;
740
741         if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
742                 val |= EDP_Y_COORDINATE_ENABLE;
743
744         val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
745         val |= intel_psr2_get_tp_time(intel_dp);
746
747         if (DISPLAY_VER(dev_priv) >= 12) {
748                 if (psr2_block_count(intel_dp) > 2)
749                         val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
750                 else
751                         val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
752         }
753
754         /* Wa_22012278275:adl-p */
755         if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
756                 static const u8 map[] = {
757                         2, /* 5 lines */
758                         1, /* 6 lines */
759                         0, /* 7 lines */
760                         3, /* 8 lines */
761                         6, /* 9 lines */
762                         5, /* 10 lines */
763                         4, /* 11 lines */
764                         7, /* 12 lines */
765                 };
766                 /*
767                  * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
768                  * comments bellow for more information
769                  */
770                 int tmp;
771
772                 tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
773                 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
774
775                 tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
776                 val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
777         } else if (DISPLAY_VER(dev_priv) >= 12) {
778                 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
779                 val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
780         } else if (DISPLAY_VER(dev_priv) >= 9) {
781                 val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
782                 val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
783         }
784
785         if (intel_dp->psr.req_psr2_sdp_prior_scanline)
786                 val |= EDP_PSR2_SU_SDP_SCANLINE;
787
788         if (intel_dp->psr.psr2_sel_fetch_enabled) {
789                 u32 tmp;
790
791                 tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
792                 drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
793         } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
794                 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
795         }
796
797         /*
798          * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
799          * recommending keep this bit unset while PSR2 is enabled.
800          */
801         intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), 0);
802
803         intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
804 }
805
806 static bool
807 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
808 {
809         if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
810                 return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
811         else if (DISPLAY_VER(dev_priv) >= 12)
812                 return cpu_transcoder == TRANSCODER_A;
813         else if (DISPLAY_VER(dev_priv) >= 9)
814                 return cpu_transcoder == TRANSCODER_EDP;
815         else
816                 return false;
817 }
818
819 static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
820 {
821         if (!cstate || !cstate->hw.active)
822                 return 0;
823
824         return DIV_ROUND_UP(1000 * 1000,
825                             drm_mode_vrefresh(&cstate->hw.adjusted_mode));
826 }
827
828 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
829                                      u32 idle_frames)
830 {
831         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
832         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
833
834         intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
835                      EDP_PSR2_IDLE_FRAMES_MASK,
836                      EDP_PSR2_IDLE_FRAMES(idle_frames));
837 }
838
839 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
840 {
841         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
842
843         psr2_program_idle_frames(intel_dp, 0);
844         intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
845 }
846
847 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
848 {
849         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
850
851         intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
852         psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
853 }
854
855 static void tgl_dc3co_disable_work(struct work_struct *work)
856 {
857         struct intel_dp *intel_dp =
858                 container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
859
860         mutex_lock(&intel_dp->psr.lock);
861         /* If delayed work is pending, it is not idle */
862         if (delayed_work_pending(&intel_dp->psr.dc3co_work))
863                 goto unlock;
864
865         tgl_psr2_disable_dc3co(intel_dp);
866 unlock:
867         mutex_unlock(&intel_dp->psr.lock);
868 }
869
870 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
871 {
872         if (!intel_dp->psr.dc3co_exitline)
873                 return;
874
875         cancel_delayed_work(&intel_dp->psr.dc3co_work);
876         /* Before PSR2 exit disallow dc3co*/
877         tgl_psr2_disable_dc3co(intel_dp);
878 }
879
880 static bool
881 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
882                               struct intel_crtc_state *crtc_state)
883 {
884         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
885         enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
886         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
887         enum port port = dig_port->base.port;
888
889         if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
890                 return pipe <= PIPE_B && port <= PORT_B;
891         else
892                 return pipe == PIPE_A && port == PORT_A;
893 }
894
895 static void
896 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
897                                   struct intel_crtc_state *crtc_state)
898 {
899         const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
900         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
901         struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
902         u32 exit_scanlines;
903
904         /*
905          * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
906          * disable DC3CO until the changed dc3co activating/deactivating sequence
907          * is applied. B.Specs:49196
908          */
909         return;
910
911         /*
912          * DMC's DC3CO exit mechanism has an issue with Selective Fecth
913          * TODO: when the issue is addressed, this restriction should be removed.
914          */
915         if (crtc_state->enable_psr2_sel_fetch)
916                 return;
917
918         if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
919                 return;
920
921         if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
922                 return;
923
924         /* Wa_16011303918:adl-p */
925         if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
926                 return;
927
928         /*
929          * DC3CO Exit time 200us B.Spec 49196
930          * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
931          */
932         exit_scanlines =
933                 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
934
935         if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
936                 return;
937
938         crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
939 }
940
941 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
942                                               struct intel_crtc_state *crtc_state)
943 {
944         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
945
946         if (!dev_priv->params.enable_psr2_sel_fetch &&
947             intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
948                 drm_dbg_kms(&dev_priv->drm,
949                             "PSR2 sel fetch not enabled, disabled by parameter\n");
950                 return false;
951         }
952
953         if (crtc_state->uapi.async_flip) {
954                 drm_dbg_kms(&dev_priv->drm,
955                             "PSR2 sel fetch not enabled, async flip enabled\n");
956                 return false;
957         }
958
959         return crtc_state->enable_psr2_sel_fetch = true;
960 }
961
962 static bool psr2_granularity_check(struct intel_dp *intel_dp,
963                                    struct intel_crtc_state *crtc_state)
964 {
965         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
966         const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
967         const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
968         const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
969         u16 y_granularity = 0;
970
971         /* PSR2 HW only send full lines so we only need to validate the width */
972         if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
973                 return false;
974
975         if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
976                 return false;
977
978         /* HW tracking is only aligned to 4 lines */
979         if (!crtc_state->enable_psr2_sel_fetch)
980                 return intel_dp->psr.su_y_granularity == 4;
981
982         /*
983          * adl_p and mtl platforms have 1 line granularity.
984          * For other platforms with SW tracking we can adjust the y coordinates
985          * to match sink requirement if multiple of 4.
986          */
987         if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
988                 y_granularity = intel_dp->psr.su_y_granularity;
989         else if (intel_dp->psr.su_y_granularity <= 2)
990                 y_granularity = 4;
991         else if ((intel_dp->psr.su_y_granularity % 4) == 0)
992                 y_granularity = intel_dp->psr.su_y_granularity;
993
994         if (y_granularity == 0 || crtc_vdisplay % y_granularity)
995                 return false;
996
997         if (crtc_state->dsc.compression_enable &&
998             vdsc_cfg->slice_height % y_granularity)
999                 return false;
1000
1001         crtc_state->su_y_granularity = y_granularity;
1002         return true;
1003 }
1004
1005 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1006                                                         struct intel_crtc_state *crtc_state)
1007 {
1008         const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1009         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1010         u32 hblank_total, hblank_ns, req_ns;
1011
1012         hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1013         hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1014
1015         /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1016         req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1017
1018         if ((hblank_ns - req_ns) > 100)
1019                 return true;
1020
1021         /* Not supported <13 / Wa_22012279113:adl-p */
1022         if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1023                 return false;
1024
1025         crtc_state->req_psr2_sdp_prior_scanline = true;
1026         return true;
1027 }
1028
1029 static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
1030                                      struct intel_crtc_state *crtc_state)
1031 {
1032         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1033         int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
1034         u8 max_wake_lines;
1035
1036         if (DISPLAY_VER(i915) >= 12) {
1037                 io_wake_time = 42;
1038                 /*
1039                  * According to Bspec it's 42us, but based on testing
1040                  * it is not enough -> use 45 us.
1041                  */
1042                 fast_wake_time = 45;
1043                 max_wake_lines = 12;
1044         } else {
1045                 io_wake_time = 50;
1046                 fast_wake_time = 32;
1047                 max_wake_lines = 8;
1048         }
1049
1050         io_wake_lines = intel_usecs_to_scanlines(
1051                 &crtc_state->hw.adjusted_mode, io_wake_time);
1052         fast_wake_lines = intel_usecs_to_scanlines(
1053                 &crtc_state->hw.adjusted_mode, fast_wake_time);
1054
1055         if (io_wake_lines > max_wake_lines ||
1056             fast_wake_lines > max_wake_lines)
1057                 return false;
1058
1059         if (i915->params.psr_safest_params)
1060                 io_wake_lines = fast_wake_lines = max_wake_lines;
1061
1062         /* According to Bspec lower limit should be set as 7 lines. */
1063         intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
1064         intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
1065
1066         return true;
1067 }
1068
1069 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1070                                     struct intel_crtc_state *crtc_state)
1071 {
1072         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1073         int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1074         int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1075         int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1076
1077         if (!intel_dp->psr.sink_psr2_support)
1078                 return false;
1079
1080         /* JSL and EHL only supports eDP 1.3 */
1081         if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
1082                 drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
1083                 return false;
1084         }
1085
1086         /* Wa_16011181250 */
1087         if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1088             IS_DG2(dev_priv)) {
1089                 drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
1090                 return false;
1091         }
1092
1093         if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1094                 drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
1095                 return false;
1096         }
1097
1098         if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
1099                 drm_dbg_kms(&dev_priv->drm,
1100                             "PSR2 not supported in transcoder %s\n",
1101                             transcoder_name(crtc_state->cpu_transcoder));
1102                 return false;
1103         }
1104
1105         if (!psr2_global_enabled(intel_dp)) {
1106                 drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
1107                 return false;
1108         }
1109
1110         /*
1111          * DSC and PSR2 cannot be enabled simultaneously. If a requested
1112          * resolution requires DSC to be enabled, priority is given to DSC
1113          * over PSR2.
1114          */
1115         if (crtc_state->dsc.compression_enable &&
1116             (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))) {
1117                 drm_dbg_kms(&dev_priv->drm,
1118                             "PSR2 cannot be enabled since DSC is enabled\n");
1119                 return false;
1120         }
1121
1122         if (crtc_state->crc_enabled) {
1123                 drm_dbg_kms(&dev_priv->drm,
1124                             "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
1125                 return false;
1126         }
1127
1128         if (DISPLAY_VER(dev_priv) >= 12) {
1129                 psr_max_h = 5120;
1130                 psr_max_v = 3200;
1131                 max_bpp = 30;
1132         } else if (DISPLAY_VER(dev_priv) >= 10) {
1133                 psr_max_h = 4096;
1134                 psr_max_v = 2304;
1135                 max_bpp = 24;
1136         } else if (DISPLAY_VER(dev_priv) == 9) {
1137                 psr_max_h = 3640;
1138                 psr_max_v = 2304;
1139                 max_bpp = 24;
1140         }
1141
1142         if (crtc_state->pipe_bpp > max_bpp) {
1143                 drm_dbg_kms(&dev_priv->drm,
1144                             "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1145                             crtc_state->pipe_bpp, max_bpp);
1146                 return false;
1147         }
1148
1149         /* Wa_16011303918:adl-p */
1150         if (crtc_state->vrr.enable &&
1151             IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1152                 drm_dbg_kms(&dev_priv->drm,
1153                             "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1154                 return false;
1155         }
1156
1157         if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1158                 drm_dbg_kms(&dev_priv->drm,
1159                             "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1160                 return false;
1161         }
1162
1163         if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
1164                 drm_dbg_kms(&dev_priv->drm,
1165                             "PSR2 not enabled, Unable to use long enough wake times\n");
1166                 return false;
1167         }
1168
1169         /* Vblank >= PSR2_CTL Block Count Number maximum line count */
1170         if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
1171             crtc_state->hw.adjusted_mode.crtc_vblank_start <
1172             psr2_block_count_lines(intel_dp)) {
1173                 drm_dbg_kms(&dev_priv->drm,
1174                             "PSR2 not enabled, too short vblank time\n");
1175                 return false;
1176         }
1177
1178         if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1179                 if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1180                     !HAS_PSR_HW_TRACKING(dev_priv)) {
1181                         drm_dbg_kms(&dev_priv->drm,
1182                                     "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
1183                         return false;
1184                 }
1185         }
1186
1187         if (!psr2_granularity_check(intel_dp, crtc_state)) {
1188                 drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
1189                 goto unsupported;
1190         }
1191
1192         if (!crtc_state->enable_psr2_sel_fetch &&
1193             (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1194                 drm_dbg_kms(&dev_priv->drm,
1195                             "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1196                             crtc_hdisplay, crtc_vdisplay,
1197                             psr_max_h, psr_max_v);
1198                 goto unsupported;
1199         }
1200
1201         tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1202         return true;
1203
1204 unsupported:
1205         crtc_state->enable_psr2_sel_fetch = false;
1206         return false;
1207 }
1208
1209 void intel_psr_compute_config(struct intel_dp *intel_dp,
1210                               struct intel_crtc_state *crtc_state,
1211                               struct drm_connector_state *conn_state)
1212 {
1213         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1214         const struct drm_display_mode *adjusted_mode =
1215                 &crtc_state->hw.adjusted_mode;
1216         int psr_setup_time;
1217
1218         /*
1219          * Current PSR panels don't work reliably with VRR enabled
1220          * So if VRR is enabled, do not enable PSR.
1221          */
1222         if (crtc_state->vrr.enable)
1223                 return;
1224
1225         if (!CAN_PSR(intel_dp))
1226                 return;
1227
1228         if (!psr_global_enabled(intel_dp)) {
1229                 drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1230                 return;
1231         }
1232
1233         if (intel_dp->psr.sink_not_reliable) {
1234                 drm_dbg_kms(&dev_priv->drm,
1235                             "PSR sink implementation is not reliable\n");
1236                 return;
1237         }
1238
1239         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1240                 drm_dbg_kms(&dev_priv->drm,
1241                             "PSR condition failed: Interlaced mode enabled\n");
1242                 return;
1243         }
1244
1245         psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1246         if (psr_setup_time < 0) {
1247                 drm_dbg_kms(&dev_priv->drm,
1248                             "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1249                             intel_dp->psr_dpcd[1]);
1250                 return;
1251         }
1252
1253         if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1254             adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1255                 drm_dbg_kms(&dev_priv->drm,
1256                             "PSR condition failed: PSR setup time (%d us) too long\n",
1257                             psr_setup_time);
1258                 return;
1259         }
1260
1261         crtc_state->has_psr = true;
1262         crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1263
1264         crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1265         intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1266                                      &crtc_state->psr_vsc);
1267 }
1268
1269 void intel_psr_get_config(struct intel_encoder *encoder,
1270                           struct intel_crtc_state *pipe_config)
1271 {
1272         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1273         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1274         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1275         struct intel_dp *intel_dp;
1276         u32 val;
1277
1278         if (!dig_port)
1279                 return;
1280
1281         intel_dp = &dig_port->dp;
1282         if (!CAN_PSR(intel_dp))
1283                 return;
1284
1285         mutex_lock(&intel_dp->psr.lock);
1286         if (!intel_dp->psr.enabled)
1287                 goto unlock;
1288
1289         /*
1290          * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1291          * enabled/disabled because of frontbuffer tracking and others.
1292          */
1293         pipe_config->has_psr = true;
1294         pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1295         pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1296
1297         if (!intel_dp->psr.psr2_enabled)
1298                 goto unlock;
1299
1300         if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1301                 val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
1302                 if (val & PSR2_MAN_TRK_CTL_ENABLE)
1303                         pipe_config->enable_psr2_sel_fetch = true;
1304         }
1305
1306         if (DISPLAY_VER(dev_priv) >= 12) {
1307                 val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder));
1308                 pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1309         }
1310 unlock:
1311         mutex_unlock(&intel_dp->psr.lock);
1312 }
1313
1314 static void intel_psr_activate(struct intel_dp *intel_dp)
1315 {
1316         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1317         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1318
1319         drm_WARN_ON(&dev_priv->drm,
1320                     transcoder_has_psr2(dev_priv, cpu_transcoder) &&
1321                     intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
1322
1323         drm_WARN_ON(&dev_priv->drm,
1324                     intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
1325
1326         drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1327
1328         lockdep_assert_held(&intel_dp->psr.lock);
1329
1330         /* psr1 and psr2 are mutually exclusive.*/
1331         if (intel_dp->psr.psr2_enabled)
1332                 hsw_activate_psr2(intel_dp);
1333         else
1334                 hsw_activate_psr1(intel_dp);
1335
1336         intel_dp->psr.active = true;
1337 }
1338
1339 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1340 {
1341         switch (intel_dp->psr.pipe) {
1342         case PIPE_A:
1343                 return LATENCY_REPORTING_REMOVED_PIPE_A;
1344         case PIPE_B:
1345                 return LATENCY_REPORTING_REMOVED_PIPE_B;
1346         case PIPE_C:
1347                 return LATENCY_REPORTING_REMOVED_PIPE_C;
1348         case PIPE_D:
1349                 return LATENCY_REPORTING_REMOVED_PIPE_D;
1350         default:
1351                 MISSING_CASE(intel_dp->psr.pipe);
1352                 return 0;
1353         }
1354 }
1355
1356 /*
1357  * Wa_16013835468
1358  * Wa_14015648006
1359  */
1360 static void wm_optimization_wa(struct intel_dp *intel_dp,
1361                                const struct intel_crtc_state *crtc_state)
1362 {
1363         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1364         bool set_wa_bit = false;
1365
1366         /* Wa_14015648006 */
1367         if (IS_DISPLAY_VER(dev_priv, 11, 14))
1368                 set_wa_bit |= crtc_state->wm_level_disabled;
1369
1370         /* Wa_16013835468 */
1371         if (DISPLAY_VER(dev_priv) == 12)
1372                 set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1373                         crtc_state->hw.adjusted_mode.crtc_vdisplay;
1374
1375         if (set_wa_bit)
1376                 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1377                              0, wa_16013835468_bit_get(intel_dp));
1378         else
1379                 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1380                              wa_16013835468_bit_get(intel_dp), 0);
1381 }
1382
1383 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1384                                     const struct intel_crtc_state *crtc_state)
1385 {
1386         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1387         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1388         u32 mask;
1389
1390         /*
1391          * Only HSW and BDW have PSR AUX registers that need to be setup.
1392          * SKL+ use hardcoded values PSR AUX transactions
1393          */
1394         if (DISPLAY_VER(dev_priv) < 9)
1395                 hsw_psr_setup_aux(intel_dp);
1396
1397         /*
1398          * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1399          * mask LPSP to avoid dependency on other drivers that might block
1400          * runtime_pm besides preventing  other hw tracking issues now we
1401          * can rely on frontbuffer tracking.
1402          */
1403         mask = EDP_PSR_DEBUG_MASK_MEMUP |
1404                EDP_PSR_DEBUG_MASK_HPD |
1405                EDP_PSR_DEBUG_MASK_LPSP;
1406
1407         if (DISPLAY_VER(dev_priv) < 20)
1408                 mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1409
1410         /*
1411          * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1412          * registers in order to keep the CURSURFLIVE tricks working :(
1413          */
1414         if (IS_DISPLAY_VER(dev_priv, 9, 10))
1415                 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1416
1417         /* allow PSR with sprite enabled */
1418         if (IS_HASWELL(dev_priv))
1419                 mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1420
1421         intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
1422
1423         psr_irq_control(intel_dp);
1424
1425         /*
1426          * TODO: if future platforms supports DC3CO in more than one
1427          * transcoder, EXITLINE will need to be unset when disabling PSR
1428          */
1429         if (intel_dp->psr.dc3co_exitline)
1430                 intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
1431                              intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1432
1433         if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1434                 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1435                              intel_dp->psr.psr2_sel_fetch_enabled ?
1436                              IGNORE_PSR2_HW_TRACKING : 0);
1437
1438         /*
1439          * Wa_16013835468
1440          * Wa_14015648006
1441          */
1442         wm_optimization_wa(intel_dp, crtc_state);
1443
1444         if (intel_dp->psr.psr2_enabled) {
1445                 if (DISPLAY_VER(dev_priv) == 9)
1446                         intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1447                                      PSR2_VSC_ENABLE_PROG_HEADER |
1448                                      PSR2_ADD_VERTICAL_LINE_COUNT);
1449
1450                 /*
1451                  * Wa_16014451276:adlp,mtl[a0,b0]
1452                  * All supported adlp panels have 1-based X granularity, this may
1453                  * cause issues if non-supported panels are used.
1454                  */
1455                 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1456                         intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), 0,
1457                                      ADLP_1_BASED_X_GRANULARITY);
1458                 else if (IS_ALDERLAKE_P(dev_priv))
1459                         intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1460                                      ADLP_1_BASED_X_GRANULARITY);
1461
1462                 /* Wa_16012604467:adlp,mtl[a0,b0] */
1463                 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1464                         intel_de_rmw(dev_priv,
1465                                      MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
1466                                      MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1467                 else if (IS_ALDERLAKE_P(dev_priv))
1468                         intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1469                                      CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1470         }
1471 }
1472
1473 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1474 {
1475         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1476         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1477         u32 val;
1478
1479         /*
1480          * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1481          * will still keep the error set even after the reset done in the
1482          * irq_preinstall and irq_uninstall hooks.
1483          * And enabling in this situation cause the screen to freeze in the
1484          * first time that PSR HW tries to activate so lets keep PSR disabled
1485          * to avoid any rendering problems.
1486          */
1487         val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
1488         val &= psr_irq_psr_error_bit_get(intel_dp);
1489         if (val) {
1490                 intel_dp->psr.sink_not_reliable = true;
1491                 drm_dbg_kms(&dev_priv->drm,
1492                             "PSR interruption error set, not enabling PSR\n");
1493                 return false;
1494         }
1495
1496         return true;
1497 }
1498
1499 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1500                                     const struct intel_crtc_state *crtc_state)
1501 {
1502         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1503         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1504         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
1505         struct intel_encoder *encoder = &dig_port->base;
1506         u32 val;
1507
1508         drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1509
1510         intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1511         intel_dp->psr.busy_frontbuffer_bits = 0;
1512         intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1513         intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1514         /* DC5/DC6 requires at least 6 idle frames */
1515         val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1516         intel_dp->psr.dc3co_exit_delay = val;
1517         intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1518         intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1519         intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1520         intel_dp->psr.req_psr2_sdp_prior_scanline =
1521                 crtc_state->req_psr2_sdp_prior_scanline;
1522
1523         if (!psr_interrupt_error_check(intel_dp))
1524                 return;
1525
1526         drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1527                     intel_dp->psr.psr2_enabled ? "2" : "1");
1528         intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
1529         intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
1530         intel_psr_enable_sink(intel_dp);
1531         intel_psr_enable_source(intel_dp, crtc_state);
1532         intel_dp->psr.enabled = true;
1533         intel_dp->psr.paused = false;
1534
1535         intel_psr_activate(intel_dp);
1536 }
1537
1538 static void intel_psr_exit(struct intel_dp *intel_dp)
1539 {
1540         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1541         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1542         u32 val;
1543
1544         if (!intel_dp->psr.active) {
1545                 if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
1546                         val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
1547                         drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1548                 }
1549
1550                 val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
1551                 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1552
1553                 return;
1554         }
1555
1556         if (intel_dp->psr.psr2_enabled) {
1557                 tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1558
1559                 val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
1560                                    EDP_PSR2_ENABLE, 0);
1561
1562                 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1563         } else {
1564                 val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
1565                                    EDP_PSR_ENABLE, 0);
1566
1567                 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1568         }
1569         intel_dp->psr.active = false;
1570 }
1571
1572 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1573 {
1574         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1575         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1576         i915_reg_t psr_status;
1577         u32 psr_status_mask;
1578
1579         if (intel_dp->psr.psr2_enabled) {
1580                 psr_status = EDP_PSR2_STATUS(cpu_transcoder);
1581                 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1582         } else {
1583                 psr_status = psr_status_reg(dev_priv, cpu_transcoder);
1584                 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1585         }
1586
1587         /* Wait till PSR is idle */
1588         if (intel_de_wait_for_clear(dev_priv, psr_status,
1589                                     psr_status_mask, 2000))
1590                 drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1591 }
1592
1593 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1594 {
1595         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1596         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1597         enum phy phy = intel_port_to_phy(dev_priv,
1598                                          dp_to_dig_port(intel_dp)->base.port);
1599
1600         lockdep_assert_held(&intel_dp->psr.lock);
1601
1602         if (!intel_dp->psr.enabled)
1603                 return;
1604
1605         drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1606                     intel_dp->psr.psr2_enabled ? "2" : "1");
1607
1608         intel_psr_exit(intel_dp);
1609         intel_psr_wait_exit_locked(intel_dp);
1610
1611         /*
1612          * Wa_16013835468
1613          * Wa_14015648006
1614          */
1615         if (DISPLAY_VER(dev_priv) >= 11)
1616                 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1617                              wa_16013835468_bit_get(intel_dp), 0);
1618
1619         if (intel_dp->psr.psr2_enabled) {
1620                 /* Wa_16012604467:adlp,mtl[a0,b0] */
1621                 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1622                         intel_de_rmw(dev_priv,
1623                                      MTL_CLKGATE_DIS_TRANS(cpu_transcoder),
1624                                      MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
1625                 else if (IS_ALDERLAKE_P(dev_priv))
1626                         intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
1627                                      CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
1628         }
1629
1630         intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
1631
1632         /* Disable PSR on Sink */
1633         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1634
1635         if (intel_dp->psr.psr2_enabled)
1636                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1637
1638         intel_dp->psr.enabled = false;
1639         intel_dp->psr.psr2_enabled = false;
1640         intel_dp->psr.psr2_sel_fetch_enabled = false;
1641         intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1642 }
1643
1644 /**
1645  * intel_psr_disable - Disable PSR
1646  * @intel_dp: Intel DP
1647  * @old_crtc_state: old CRTC state
1648  *
1649  * This function needs to be called before disabling pipe.
1650  */
1651 void intel_psr_disable(struct intel_dp *intel_dp,
1652                        const struct intel_crtc_state *old_crtc_state)
1653 {
1654         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1655
1656         if (!old_crtc_state->has_psr)
1657                 return;
1658
1659         if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1660                 return;
1661
1662         mutex_lock(&intel_dp->psr.lock);
1663
1664         intel_psr_disable_locked(intel_dp);
1665
1666         mutex_unlock(&intel_dp->psr.lock);
1667         cancel_work_sync(&intel_dp->psr.work);
1668         cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1669 }
1670
1671 /**
1672  * intel_psr_pause - Pause PSR
1673  * @intel_dp: Intel DP
1674  *
1675  * This function need to be called after enabling psr.
1676  */
1677 void intel_psr_pause(struct intel_dp *intel_dp)
1678 {
1679         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1680         struct intel_psr *psr = &intel_dp->psr;
1681
1682         if (!CAN_PSR(intel_dp))
1683                 return;
1684
1685         mutex_lock(&psr->lock);
1686
1687         if (!psr->enabled) {
1688                 mutex_unlock(&psr->lock);
1689                 return;
1690         }
1691
1692         /* If we ever hit this, we will need to add refcount to pause/resume */
1693         drm_WARN_ON(&dev_priv->drm, psr->paused);
1694
1695         intel_psr_exit(intel_dp);
1696         intel_psr_wait_exit_locked(intel_dp);
1697         psr->paused = true;
1698
1699         mutex_unlock(&psr->lock);
1700
1701         cancel_work_sync(&psr->work);
1702         cancel_delayed_work_sync(&psr->dc3co_work);
1703 }
1704
1705 /**
1706  * intel_psr_resume - Resume PSR
1707  * @intel_dp: Intel DP
1708  *
1709  * This function need to be called after pausing psr.
1710  */
1711 void intel_psr_resume(struct intel_dp *intel_dp)
1712 {
1713         struct intel_psr *psr = &intel_dp->psr;
1714
1715         if (!CAN_PSR(intel_dp))
1716                 return;
1717
1718         mutex_lock(&psr->lock);
1719
1720         if (!psr->paused)
1721                 goto unlock;
1722
1723         psr->paused = false;
1724         intel_psr_activate(intel_dp);
1725
1726 unlock:
1727         mutex_unlock(&psr->lock);
1728 }
1729
1730 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
1731 {
1732         return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
1733                 PSR2_MAN_TRK_CTL_ENABLE;
1734 }
1735
1736 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
1737 {
1738         return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1739                ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
1740                PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1741 }
1742
1743 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
1744 {
1745         return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1746                ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
1747                PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1748 }
1749
1750 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
1751 {
1752         return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1753                ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
1754                PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
1755 }
1756
1757 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1758 {
1759         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1760         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1761
1762         if (intel_dp->psr.psr2_sel_fetch_enabled)
1763                 intel_de_write(dev_priv,
1764                                PSR2_MAN_TRK_CTL(cpu_transcoder),
1765                                man_trk_ctl_enable_bit_get(dev_priv) |
1766                                man_trk_ctl_partial_frame_bit_get(dev_priv) |
1767                                man_trk_ctl_single_full_frame_bit_get(dev_priv) |
1768                                man_trk_ctl_continuos_full_frame(dev_priv));
1769
1770         /*
1771          * Display WA #0884: skl+
1772          * This documented WA for bxt can be safely applied
1773          * broadly so we can force HW tracking to exit PSR
1774          * instead of disabling and re-enabling.
1775          * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1776          * but it makes more sense write to the current active
1777          * pipe.
1778          *
1779          * This workaround do not exist for platforms with display 10 or newer
1780          * but testing proved that it works for up display 13, for newer
1781          * than that testing will be needed.
1782          */
1783         intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1784 }
1785
1786 void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
1787                                             const struct intel_crtc_state *crtc_state)
1788 {
1789         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1790         enum pipe pipe = plane->pipe;
1791
1792         if (!crtc_state->enable_psr2_sel_fetch)
1793                 return;
1794
1795         intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
1796 }
1797
1798 void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
1799                                             const struct intel_crtc_state *crtc_state,
1800                                             const struct intel_plane_state *plane_state)
1801 {
1802         struct drm_i915_private *i915 = to_i915(plane->base.dev);
1803         enum pipe pipe = plane->pipe;
1804
1805         if (!crtc_state->enable_psr2_sel_fetch)
1806                 return;
1807
1808         if (plane->id == PLANE_CURSOR)
1809                 intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1810                                   plane_state->ctl);
1811         else
1812                 intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1813                                   PLANE_SEL_FETCH_CTL_ENABLE);
1814 }
1815
1816 void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
1817                                               const struct intel_crtc_state *crtc_state,
1818                                               const struct intel_plane_state *plane_state,
1819                                               int color_plane)
1820 {
1821         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1822         enum pipe pipe = plane->pipe;
1823         const struct drm_rect *clip;
1824         u32 val;
1825         int x, y;
1826
1827         if (!crtc_state->enable_psr2_sel_fetch)
1828                 return;
1829
1830         if (plane->id == PLANE_CURSOR)
1831                 return;
1832
1833         clip = &plane_state->psr2_sel_fetch_area;
1834
1835         val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
1836         val |= plane_state->uapi.dst.x1;
1837         intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
1838
1839         x = plane_state->view.color_plane[color_plane].x;
1840
1841         /*
1842          * From Bspec: UV surface Start Y Position = half of Y plane Y
1843          * start position.
1844          */
1845         if (!color_plane)
1846                 y = plane_state->view.color_plane[color_plane].y + clip->y1;
1847         else
1848                 y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
1849
1850         val = y << 16 | x;
1851
1852         intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
1853                           val);
1854
1855         /* Sizes are 0 based */
1856         val = (drm_rect_height(clip) - 1) << 16;
1857         val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
1858         intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
1859 }
1860
1861 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1862 {
1863         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1864         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1865         struct intel_encoder *encoder;
1866
1867         if (!crtc_state->enable_psr2_sel_fetch)
1868                 return;
1869
1870         for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
1871                                              crtc_state->uapi.encoder_mask) {
1872                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1873
1874                 lockdep_assert_held(&intel_dp->psr.lock);
1875                 if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
1876                         return;
1877                 break;
1878         }
1879
1880         intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
1881                        crtc_state->psr2_man_track_ctl);
1882 }
1883
1884 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
1885                                   struct drm_rect *clip, bool full_update)
1886 {
1887         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1888         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1889         u32 val = man_trk_ctl_enable_bit_get(dev_priv);
1890
1891         /* SF partial frame enable has to be set even on full update */
1892         val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
1893
1894         if (full_update) {
1895                 val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
1896                 val |= man_trk_ctl_continuos_full_frame(dev_priv);
1897                 goto exit;
1898         }
1899
1900         if (clip->y1 == -1)
1901                 goto exit;
1902
1903         if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
1904                 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
1905                 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
1906         } else {
1907                 drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
1908
1909                 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1910                 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
1911         }
1912 exit:
1913         crtc_state->psr2_man_track_ctl = val;
1914 }
1915
1916 static void clip_area_update(struct drm_rect *overlap_damage_area,
1917                              struct drm_rect *damage_area,
1918                              struct drm_rect *pipe_src)
1919 {
1920         if (!drm_rect_intersect(damage_area, pipe_src))
1921                 return;
1922
1923         if (overlap_damage_area->y1 == -1) {
1924                 overlap_damage_area->y1 = damage_area->y1;
1925                 overlap_damage_area->y2 = damage_area->y2;
1926                 return;
1927         }
1928
1929         if (damage_area->y1 < overlap_damage_area->y1)
1930                 overlap_damage_area->y1 = damage_area->y1;
1931
1932         if (damage_area->y2 > overlap_damage_area->y2)
1933                 overlap_damage_area->y2 = damage_area->y2;
1934 }
1935
1936 static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
1937                                                 struct drm_rect *pipe_clip)
1938 {
1939         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1940         const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1941         u16 y_alignment;
1942
1943         /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
1944         if (crtc_state->dsc.compression_enable &&
1945             (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
1946                 y_alignment = vdsc_cfg->slice_height;
1947         else
1948                 y_alignment = crtc_state->su_y_granularity;
1949
1950         pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
1951         if (pipe_clip->y2 % y_alignment)
1952                 pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
1953 }
1954
1955 /*
1956  * TODO: Not clear how to handle planes with negative position,
1957  * also planes are not updated if they have a negative X
1958  * position so for now doing a full update in this cases
1959  *
1960  * Plane scaling and rotation is not supported by selective fetch and both
1961  * properties can change without a modeset, so need to be check at every
1962  * atomic commit.
1963  */
1964 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
1965 {
1966         if (plane_state->uapi.dst.y1 < 0 ||
1967             plane_state->uapi.dst.x1 < 0 ||
1968             plane_state->scaler_id >= 0 ||
1969             plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
1970                 return false;
1971
1972         return true;
1973 }
1974
1975 /*
1976  * Check for pipe properties that is not supported by selective fetch.
1977  *
1978  * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
1979  * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
1980  * enabled and going to the full update path.
1981  */
1982 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
1983 {
1984         if (crtc_state->scaler_state.scaler_id >= 0)
1985                 return false;
1986
1987         return true;
1988 }
1989
1990 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
1991                                 struct intel_crtc *crtc)
1992 {
1993         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1994         struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1995         struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
1996         struct intel_plane_state *new_plane_state, *old_plane_state;
1997         struct intel_plane *plane;
1998         bool full_update = false;
1999         int i, ret;
2000
2001         if (!crtc_state->enable_psr2_sel_fetch)
2002                 return 0;
2003
2004         if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2005                 full_update = true;
2006                 goto skip_sel_fetch_set_loop;
2007         }
2008
2009         /*
2010          * Calculate minimal selective fetch area of each plane and calculate
2011          * the pipe damaged area.
2012          * In the next loop the plane selective fetch area will actually be set
2013          * using whole pipe damaged area.
2014          */
2015         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2016                                              new_plane_state, i) {
2017                 struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2018                                                       .x2 = INT_MAX };
2019
2020                 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2021                         continue;
2022
2023                 if (!new_plane_state->uapi.visible &&
2024                     !old_plane_state->uapi.visible)
2025                         continue;
2026
2027                 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2028                         full_update = true;
2029                         break;
2030                 }
2031
2032                 /*
2033                  * If visibility or plane moved, mark the whole plane area as
2034                  * damaged as it needs to be complete redraw in the new and old
2035                  * position.
2036                  */
2037                 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2038                     !drm_rect_equals(&new_plane_state->uapi.dst,
2039                                      &old_plane_state->uapi.dst)) {
2040                         if (old_plane_state->uapi.visible) {
2041                                 damaged_area.y1 = old_plane_state->uapi.dst.y1;
2042                                 damaged_area.y2 = old_plane_state->uapi.dst.y2;
2043                                 clip_area_update(&pipe_clip, &damaged_area,
2044                                                  &crtc_state->pipe_src);
2045                         }
2046
2047                         if (new_plane_state->uapi.visible) {
2048                                 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2049                                 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2050                                 clip_area_update(&pipe_clip, &damaged_area,
2051                                                  &crtc_state->pipe_src);
2052                         }
2053                         continue;
2054                 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2055                         /* If alpha changed mark the whole plane area as damaged */
2056                         damaged_area.y1 = new_plane_state->uapi.dst.y1;
2057                         damaged_area.y2 = new_plane_state->uapi.dst.y2;
2058                         clip_area_update(&pipe_clip, &damaged_area,
2059                                          &crtc_state->pipe_src);
2060                         continue;
2061                 }
2062
2063                 src = drm_plane_state_src(&new_plane_state->uapi);
2064                 drm_rect_fp_to_int(&src, &src);
2065
2066                 if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2067                                                      &new_plane_state->uapi, &damaged_area))
2068                         continue;
2069
2070                 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2071                 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2072                 damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2073                 damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2074
2075                 clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
2076         }
2077
2078         /*
2079          * TODO: For now we are just using full update in case
2080          * selective fetch area calculation fails. To optimize this we
2081          * should identify cases where this happens and fix the area
2082          * calculation for those.
2083          */
2084         if (pipe_clip.y1 == -1) {
2085                 drm_info_once(&dev_priv->drm,
2086                               "Selective fetch area calculation failed in pipe %c\n",
2087                               pipe_name(crtc->pipe));
2088                 full_update = true;
2089         }
2090
2091         if (full_update)
2092                 goto skip_sel_fetch_set_loop;
2093
2094         /* Wa_14014971492 */
2095         if ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
2096              IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
2097             crtc_state->splitter.enable)
2098                 pipe_clip.y1 = 0;
2099
2100         ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2101         if (ret)
2102                 return ret;
2103
2104         intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
2105
2106         /*
2107          * Now that we have the pipe damaged area check if it intersect with
2108          * every plane, if it does set the plane selective fetch area.
2109          */
2110         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2111                                              new_plane_state, i) {
2112                 struct drm_rect *sel_fetch_area, inter;
2113                 struct intel_plane *linked = new_plane_state->planar_linked_plane;
2114
2115                 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2116                     !new_plane_state->uapi.visible)
2117                         continue;
2118
2119                 inter = pipe_clip;
2120                 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2121                         continue;
2122
2123                 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2124                         full_update = true;
2125                         break;
2126                 }
2127
2128                 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2129                 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2130                 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2131                 crtc_state->update_planes |= BIT(plane->id);
2132
2133                 /*
2134                  * Sel_fetch_area is calculated for UV plane. Use
2135                  * same area for Y plane as well.
2136                  */
2137                 if (linked) {
2138                         struct intel_plane_state *linked_new_plane_state;
2139                         struct drm_rect *linked_sel_fetch_area;
2140
2141                         linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2142                         if (IS_ERR(linked_new_plane_state))
2143                                 return PTR_ERR(linked_new_plane_state);
2144
2145                         linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2146                         linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2147                         linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2148                         crtc_state->update_planes |= BIT(linked->id);
2149                 }
2150         }
2151
2152 skip_sel_fetch_set_loop:
2153         psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
2154         return 0;
2155 }
2156
2157 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2158                                 struct intel_crtc *crtc)
2159 {
2160         struct drm_i915_private *i915 = to_i915(state->base.dev);
2161         const struct intel_crtc_state *old_crtc_state =
2162                 intel_atomic_get_old_crtc_state(state, crtc);
2163         const struct intel_crtc_state *new_crtc_state =
2164                 intel_atomic_get_new_crtc_state(state, crtc);
2165         struct intel_encoder *encoder;
2166
2167         if (!HAS_PSR(i915))
2168                 return;
2169
2170         for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2171                                              old_crtc_state->uapi.encoder_mask) {
2172                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2173                 struct intel_psr *psr = &intel_dp->psr;
2174                 bool needs_to_disable = false;
2175
2176                 mutex_lock(&psr->lock);
2177
2178                 /*
2179                  * Reasons to disable:
2180                  * - PSR disabled in new state
2181                  * - All planes will go inactive
2182                  * - Changing between PSR versions
2183                  * - Display WA #1136: skl, bxt
2184                  */
2185                 needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2186                 needs_to_disable |= !new_crtc_state->has_psr;
2187                 needs_to_disable |= !new_crtc_state->active_planes;
2188                 needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
2189                 needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2190                         new_crtc_state->wm_level_disabled;
2191
2192                 if (psr->enabled && needs_to_disable)
2193                         intel_psr_disable_locked(intel_dp);
2194                 else if (psr->enabled && new_crtc_state->wm_level_disabled)
2195                         /* Wa_14015648006 */
2196                         wm_optimization_wa(intel_dp, new_crtc_state);
2197
2198                 mutex_unlock(&psr->lock);
2199         }
2200 }
2201
2202 void intel_psr_post_plane_update(struct intel_atomic_state *state,
2203                                  struct intel_crtc *crtc)
2204 {
2205         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2206         const struct intel_crtc_state *crtc_state =
2207                 intel_atomic_get_new_crtc_state(state, crtc);
2208         struct intel_encoder *encoder;
2209
2210         if (!crtc_state->has_psr)
2211                 return;
2212
2213         for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2214                                              crtc_state->uapi.encoder_mask) {
2215                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2216                 struct intel_psr *psr = &intel_dp->psr;
2217                 bool keep_disabled = false;
2218
2219                 mutex_lock(&psr->lock);
2220
2221                 drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2222
2223                 keep_disabled |= psr->sink_not_reliable;
2224                 keep_disabled |= !crtc_state->active_planes;
2225
2226                 /* Display WA #1136: skl, bxt */
2227                 keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2228                         crtc_state->wm_level_disabled;
2229
2230                 if (!psr->enabled && !keep_disabled)
2231                         intel_psr_enable_locked(intel_dp, crtc_state);
2232                 else if (psr->enabled && !crtc_state->wm_level_disabled)
2233                         /* Wa_14015648006 */
2234                         wm_optimization_wa(intel_dp, crtc_state);
2235
2236                 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2237                 if (crtc_state->crc_enabled && psr->enabled)
2238                         psr_force_hw_tracking_exit(intel_dp);
2239
2240                 /*
2241                  * Clear possible busy bits in case we have
2242                  * invalidate -> flip -> flush sequence.
2243                  */
2244                 intel_dp->psr.busy_frontbuffer_bits = 0;
2245
2246                 mutex_unlock(&psr->lock);
2247         }
2248 }
2249
2250 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2251 {
2252         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2253         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2254
2255         /*
2256          * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2257          * As all higher states has bit 4 of PSR2 state set we can just wait for
2258          * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2259          */
2260         return intel_de_wait_for_clear(dev_priv,
2261                                        EDP_PSR2_STATUS(cpu_transcoder),
2262                                        EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2263 }
2264
2265 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2266 {
2267         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2268         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2269
2270         /*
2271          * From bspec: Panel Self Refresh (BDW+)
2272          * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2273          * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2274          * defensive enough to cover everything.
2275          */
2276         return intel_de_wait_for_clear(dev_priv,
2277                                        psr_status_reg(dev_priv, cpu_transcoder),
2278                                        EDP_PSR_STATUS_STATE_MASK, 50);
2279 }
2280
2281 /**
2282  * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2283  * @new_crtc_state: new CRTC state
2284  *
2285  * This function is expected to be called from pipe_update_start() where it is
2286  * not expected to race with PSR enable or disable.
2287  */
2288 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2289 {
2290         struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2291         struct intel_encoder *encoder;
2292
2293         if (!new_crtc_state->has_psr)
2294                 return;
2295
2296         for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2297                                              new_crtc_state->uapi.encoder_mask) {
2298                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2299                 int ret;
2300
2301                 lockdep_assert_held(&intel_dp->psr.lock);
2302
2303                 if (!intel_dp->psr.enabled)
2304                         continue;
2305
2306                 if (intel_dp->psr.psr2_enabled)
2307                         ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2308                 else
2309                         ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2310
2311                 if (ret)
2312                         drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2313         }
2314 }
2315
2316 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2317 {
2318         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2319         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2320         i915_reg_t reg;
2321         u32 mask;
2322         int err;
2323
2324         if (!intel_dp->psr.enabled)
2325                 return false;
2326
2327         if (intel_dp->psr.psr2_enabled) {
2328                 reg = EDP_PSR2_STATUS(cpu_transcoder);
2329                 mask = EDP_PSR2_STATUS_STATE_MASK;
2330         } else {
2331                 reg = psr_status_reg(dev_priv, cpu_transcoder);
2332                 mask = EDP_PSR_STATUS_STATE_MASK;
2333         }
2334
2335         mutex_unlock(&intel_dp->psr.lock);
2336
2337         err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2338         if (err)
2339                 drm_err(&dev_priv->drm,
2340                         "Timed out waiting for PSR Idle for re-enable\n");
2341
2342         /* After the unlocked wait, verify that PSR is still wanted! */
2343         mutex_lock(&intel_dp->psr.lock);
2344         return err == 0 && intel_dp->psr.enabled;
2345 }
2346
2347 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2348 {
2349         struct drm_connector_list_iter conn_iter;
2350         struct drm_modeset_acquire_ctx ctx;
2351         struct drm_atomic_state *state;
2352         struct drm_connector *conn;
2353         int err = 0;
2354
2355         state = drm_atomic_state_alloc(&dev_priv->drm);
2356         if (!state)
2357                 return -ENOMEM;
2358
2359         drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2360
2361         state->acquire_ctx = &ctx;
2362         to_intel_atomic_state(state)->internal = true;
2363
2364 retry:
2365         drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2366         drm_for_each_connector_iter(conn, &conn_iter) {
2367                 struct drm_connector_state *conn_state;
2368                 struct drm_crtc_state *crtc_state;
2369
2370                 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2371                         continue;
2372
2373                 conn_state = drm_atomic_get_connector_state(state, conn);
2374                 if (IS_ERR(conn_state)) {
2375                         err = PTR_ERR(conn_state);
2376                         break;
2377                 }
2378
2379                 if (!conn_state->crtc)
2380                         continue;
2381
2382                 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2383                 if (IS_ERR(crtc_state)) {
2384                         err = PTR_ERR(crtc_state);
2385                         break;
2386                 }
2387
2388                 /* Mark mode as changed to trigger a pipe->update() */
2389                 crtc_state->mode_changed = true;
2390         }
2391         drm_connector_list_iter_end(&conn_iter);
2392
2393         if (err == 0)
2394                 err = drm_atomic_commit(state);
2395
2396         if (err == -EDEADLK) {
2397                 drm_atomic_state_clear(state);
2398                 err = drm_modeset_backoff(&ctx);
2399                 if (!err)
2400                         goto retry;
2401         }
2402
2403         drm_modeset_drop_locks(&ctx);
2404         drm_modeset_acquire_fini(&ctx);
2405         drm_atomic_state_put(state);
2406
2407         return err;
2408 }
2409
2410 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2411 {
2412         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2413         const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2414         u32 old_mode;
2415         int ret;
2416
2417         if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2418             mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2419                 drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2420                 return -EINVAL;
2421         }
2422
2423         ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2424         if (ret)
2425                 return ret;
2426
2427         old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2428         intel_dp->psr.debug = val;
2429
2430         /*
2431          * Do it right away if it's already enabled, otherwise it will be done
2432          * when enabling the source.
2433          */
2434         if (intel_dp->psr.enabled)
2435                 psr_irq_control(intel_dp);
2436
2437         mutex_unlock(&intel_dp->psr.lock);
2438
2439         if (old_mode != mode)
2440                 ret = intel_psr_fastset_force(dev_priv);
2441
2442         return ret;
2443 }
2444
2445 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2446 {
2447         struct intel_psr *psr = &intel_dp->psr;
2448
2449         intel_psr_disable_locked(intel_dp);
2450         psr->sink_not_reliable = true;
2451         /* let's make sure that sink is awaken */
2452         drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2453 }
2454
2455 static void intel_psr_work(struct work_struct *work)
2456 {
2457         struct intel_dp *intel_dp =
2458                 container_of(work, typeof(*intel_dp), psr.work);
2459
2460         mutex_lock(&intel_dp->psr.lock);
2461
2462         if (!intel_dp->psr.enabled)
2463                 goto unlock;
2464
2465         if (READ_ONCE(intel_dp->psr.irq_aux_error))
2466                 intel_psr_handle_irq(intel_dp);
2467
2468         /*
2469          * We have to make sure PSR is ready for re-enable
2470          * otherwise it keeps disabled until next full enable/disable cycle.
2471          * PSR might take some time to get fully disabled
2472          * and be ready for re-enable.
2473          */
2474         if (!__psr_wait_for_idle_locked(intel_dp))
2475                 goto unlock;
2476
2477         /*
2478          * The delayed work can race with an invalidate hence we need to
2479          * recheck. Since psr_flush first clears this and then reschedules we
2480          * won't ever miss a flush when bailing out here.
2481          */
2482         if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2483                 goto unlock;
2484
2485         intel_psr_activate(intel_dp);
2486 unlock:
2487         mutex_unlock(&intel_dp->psr.lock);
2488 }
2489
2490 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2491 {
2492         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2493         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2494
2495         if (intel_dp->psr.psr2_sel_fetch_enabled) {
2496                 u32 val;
2497
2498                 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2499                         /* Send one update otherwise lag is observed in screen */
2500                         intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2501                         return;
2502                 }
2503
2504                 val = man_trk_ctl_enable_bit_get(dev_priv) |
2505                       man_trk_ctl_partial_frame_bit_get(dev_priv) |
2506                       man_trk_ctl_continuos_full_frame(dev_priv);
2507                 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val);
2508                 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2509                 intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
2510         } else {
2511                 intel_psr_exit(intel_dp);
2512         }
2513 }
2514
2515 /**
2516  * intel_psr_invalidate - Invalidate PSR
2517  * @dev_priv: i915 device
2518  * @frontbuffer_bits: frontbuffer plane tracking bits
2519  * @origin: which operation caused the invalidate
2520  *
2521  * Since the hardware frontbuffer tracking has gaps we need to integrate
2522  * with the software frontbuffer tracking. This function gets called every
2523  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2524  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2525  *
2526  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2527  */
2528 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2529                           unsigned frontbuffer_bits, enum fb_op_origin origin)
2530 {
2531         struct intel_encoder *encoder;
2532
2533         if (origin == ORIGIN_FLIP)
2534                 return;
2535
2536         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2537                 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2538                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2539
2540                 mutex_lock(&intel_dp->psr.lock);
2541                 if (!intel_dp->psr.enabled) {
2542                         mutex_unlock(&intel_dp->psr.lock);
2543                         continue;
2544                 }
2545
2546                 pipe_frontbuffer_bits &=
2547                         INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2548                 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2549
2550                 if (pipe_frontbuffer_bits)
2551                         _psr_invalidate_handle(intel_dp);
2552
2553                 mutex_unlock(&intel_dp->psr.lock);
2554         }
2555 }
2556 /*
2557  * When we will be completely rely on PSR2 S/W tracking in future,
2558  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2559  * event also therefore tgl_dc3co_flush_locked() require to be changed
2560  * accordingly in future.
2561  */
2562 static void
2563 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2564                        enum fb_op_origin origin)
2565 {
2566         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2567
2568         if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
2569             !intel_dp->psr.active)
2570                 return;
2571
2572         /*
2573          * At every frontbuffer flush flip event modified delay of delayed work,
2574          * when delayed work schedules that means display has been idle.
2575          */
2576         if (!(frontbuffer_bits &
2577             INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2578                 return;
2579
2580         tgl_psr2_enable_dc3co(intel_dp);
2581         mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
2582                          intel_dp->psr.dc3co_exit_delay);
2583 }
2584
2585 static void _psr_flush_handle(struct intel_dp *intel_dp)
2586 {
2587         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2588         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2589
2590         if (intel_dp->psr.psr2_sel_fetch_enabled) {
2591                 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2592                         /* can we turn CFF off? */
2593                         if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2594                                 u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2595                                         man_trk_ctl_partial_frame_bit_get(dev_priv) |
2596                                         man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2597                                         man_trk_ctl_continuos_full_frame(dev_priv);
2598
2599                                 /*
2600                                  * Set psr2_sel_fetch_cff_enabled as false to allow selective
2601                                  * updates. Still keep cff bit enabled as we don't have proper
2602                                  * SU configuration in case update is sent for any reason after
2603                                  * sff bit gets cleared by the HW on next vblank.
2604                                  */
2605                                 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2606                                                val);
2607                                 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2608                                 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2609                         }
2610                 } else {
2611                         /*
2612                          * continuous full frame is disabled, only a single full
2613                          * frame is required
2614                          */
2615                         psr_force_hw_tracking_exit(intel_dp);
2616                 }
2617         } else {
2618                 psr_force_hw_tracking_exit(intel_dp);
2619
2620                 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2621                         queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
2622         }
2623 }
2624
2625 /**
2626  * intel_psr_flush - Flush PSR
2627  * @dev_priv: i915 device
2628  * @frontbuffer_bits: frontbuffer plane tracking bits
2629  * @origin: which operation caused the flush
2630  *
2631  * Since the hardware frontbuffer tracking has gaps we need to integrate
2632  * with the software frontbuffer tracking. This function gets called every
2633  * time frontbuffer rendering has completed and flushed out to memory. PSR
2634  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2635  *
2636  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2637  */
2638 void intel_psr_flush(struct drm_i915_private *dev_priv,
2639                      unsigned frontbuffer_bits, enum fb_op_origin origin)
2640 {
2641         struct intel_encoder *encoder;
2642
2643         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2644                 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2645                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2646
2647                 mutex_lock(&intel_dp->psr.lock);
2648                 if (!intel_dp->psr.enabled) {
2649                         mutex_unlock(&intel_dp->psr.lock);
2650                         continue;
2651                 }
2652
2653                 pipe_frontbuffer_bits &=
2654                         INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2655                 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2656
2657                 /*
2658                  * If the PSR is paused by an explicit intel_psr_paused() call,
2659                  * we have to ensure that the PSR is not activated until
2660                  * intel_psr_resume() is called.
2661                  */
2662                 if (intel_dp->psr.paused)
2663                         goto unlock;
2664
2665                 if (origin == ORIGIN_FLIP ||
2666                     (origin == ORIGIN_CURSOR_UPDATE &&
2667                      !intel_dp->psr.psr2_sel_fetch_enabled)) {
2668                         tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
2669                         goto unlock;
2670                 }
2671
2672                 if (pipe_frontbuffer_bits == 0)
2673                         goto unlock;
2674
2675                 /* By definition flush = invalidate + flush */
2676                 _psr_flush_handle(intel_dp);
2677 unlock:
2678                 mutex_unlock(&intel_dp->psr.lock);
2679         }
2680 }
2681
2682 /**
2683  * intel_psr_init - Init basic PSR work and mutex.
2684  * @intel_dp: Intel DP
2685  *
2686  * This function is called after the initializing connector.
2687  * (the initializing of connector treats the handling of connector capabilities)
2688  * And it initializes basic PSR stuff for each DP Encoder.
2689  */
2690 void intel_psr_init(struct intel_dp *intel_dp)
2691 {
2692         struct intel_connector *connector = intel_dp->attached_connector;
2693         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2694         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2695
2696         if (!HAS_PSR(dev_priv))
2697                 return;
2698
2699         /*
2700          * HSW spec explicitly says PSR is tied to port A.
2701          * BDW+ platforms have a instance of PSR registers per transcoder but
2702          * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2703          * than eDP one.
2704          * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2705          * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2706          * But GEN12 supports a instance of PSR registers per transcoder.
2707          */
2708         if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2709                 drm_dbg_kms(&dev_priv->drm,
2710                             "PSR condition failed: Port not supported\n");
2711                 return;
2712         }
2713
2714         intel_dp->psr.source_support = true;
2715
2716         /* Set link_standby x link_off defaults */
2717         if (DISPLAY_VER(dev_priv) < 12)
2718                 /* For new platforms up to TGL let's respect VBT back again */
2719                 intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
2720
2721         INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2722         INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2723         mutex_init(&intel_dp->psr.lock);
2724 }
2725
2726 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2727                                            u8 *status, u8 *error_status)
2728 {
2729         struct drm_dp_aux *aux = &intel_dp->aux;
2730         int ret;
2731
2732         ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
2733         if (ret != 1)
2734                 return ret;
2735
2736         ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
2737         if (ret != 1)
2738                 return ret;
2739
2740         *status = *status & DP_PSR_SINK_STATE_MASK;
2741
2742         return 0;
2743 }
2744
2745 static void psr_alpm_check(struct intel_dp *intel_dp)
2746 {
2747         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2748         struct drm_dp_aux *aux = &intel_dp->aux;
2749         struct intel_psr *psr = &intel_dp->psr;
2750         u8 val;
2751         int r;
2752
2753         if (!psr->psr2_enabled)
2754                 return;
2755
2756         r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2757         if (r != 1) {
2758                 drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2759                 return;
2760         }
2761
2762         if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2763                 intel_psr_disable_locked(intel_dp);
2764                 psr->sink_not_reliable = true;
2765                 drm_dbg_kms(&dev_priv->drm,
2766                             "ALPM lock timeout error, disabling PSR\n");
2767
2768                 /* Clearing error */
2769                 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
2770         }
2771 }
2772
2773 static void psr_capability_changed_check(struct intel_dp *intel_dp)
2774 {
2775         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2776         struct intel_psr *psr = &intel_dp->psr;
2777         u8 val;
2778         int r;
2779
2780         r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
2781         if (r != 1) {
2782                 drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2783                 return;
2784         }
2785
2786         if (val & DP_PSR_CAPS_CHANGE) {
2787                 intel_psr_disable_locked(intel_dp);
2788                 psr->sink_not_reliable = true;
2789                 drm_dbg_kms(&dev_priv->drm,
2790                             "Sink PSR capability changed, disabling PSR\n");
2791
2792                 /* Clearing it */
2793                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
2794         }
2795 }
2796
2797 void intel_psr_short_pulse(struct intel_dp *intel_dp)
2798 {
2799         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2800         struct intel_psr *psr = &intel_dp->psr;
2801         u8 status, error_status;
2802         const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2803                           DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
2804                           DP_PSR_LINK_CRC_ERROR;
2805
2806         if (!CAN_PSR(intel_dp))
2807                 return;
2808
2809         mutex_lock(&psr->lock);
2810
2811         if (!psr->enabled)
2812                 goto exit;
2813
2814         if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2815                 drm_err(&dev_priv->drm,
2816                         "Error reading PSR status or error status\n");
2817                 goto exit;
2818         }
2819
2820         if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
2821                 intel_psr_disable_locked(intel_dp);
2822                 psr->sink_not_reliable = true;
2823         }
2824
2825         if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
2826                 drm_dbg_kms(&dev_priv->drm,
2827                             "PSR sink internal error, disabling PSR\n");
2828         if (error_status & DP_PSR_RFB_STORAGE_ERROR)
2829                 drm_dbg_kms(&dev_priv->drm,
2830                             "PSR RFB storage error, disabling PSR\n");
2831         if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
2832                 drm_dbg_kms(&dev_priv->drm,
2833                             "PSR VSC SDP uncorrectable error, disabling PSR\n");
2834         if (error_status & DP_PSR_LINK_CRC_ERROR)
2835                 drm_dbg_kms(&dev_priv->drm,
2836                             "PSR Link CRC error, disabling PSR\n");
2837
2838         if (error_status & ~errors)
2839                 drm_err(&dev_priv->drm,
2840                         "PSR_ERROR_STATUS unhandled errors %x\n",
2841                         error_status & ~errors);
2842         /* clear status register */
2843         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2844
2845         psr_alpm_check(intel_dp);
2846         psr_capability_changed_check(intel_dp);
2847
2848 exit:
2849         mutex_unlock(&psr->lock);
2850 }
2851
2852 bool intel_psr_enabled(struct intel_dp *intel_dp)
2853 {
2854         bool ret;
2855
2856         if (!CAN_PSR(intel_dp))
2857                 return false;
2858
2859         mutex_lock(&intel_dp->psr.lock);
2860         ret = intel_dp->psr.enabled;
2861         mutex_unlock(&intel_dp->psr.lock);
2862
2863         return ret;
2864 }
2865
2866 /**
2867  * intel_psr_lock - grab PSR lock
2868  * @crtc_state: the crtc state
2869  *
2870  * This is initially meant to be used by around CRTC update, when
2871  * vblank sensitive registers are updated and we need grab the lock
2872  * before it to avoid vblank evasion.
2873  */
2874 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
2875 {
2876         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2877         struct intel_encoder *encoder;
2878
2879         if (!crtc_state->has_psr)
2880                 return;
2881
2882         for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2883                                              crtc_state->uapi.encoder_mask) {
2884                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2885
2886                 mutex_lock(&intel_dp->psr.lock);
2887                 break;
2888         }
2889 }
2890
2891 /**
2892  * intel_psr_unlock - release PSR lock
2893  * @crtc_state: the crtc state
2894  *
2895  * Release the PSR lock that was held during pipe update.
2896  */
2897 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
2898 {
2899         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2900         struct intel_encoder *encoder;
2901
2902         if (!crtc_state->has_psr)
2903                 return;
2904
2905         for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2906                                              crtc_state->uapi.encoder_mask) {
2907                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2908
2909                 mutex_unlock(&intel_dp->psr.lock);
2910                 break;
2911         }
2912 }
2913
2914 static void
2915 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
2916 {
2917         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2918         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2919         const char *status = "unknown";
2920         u32 val, status_val;
2921
2922         if (intel_dp->psr.psr2_enabled) {
2923                 static const char * const live_status[] = {
2924                         "IDLE",
2925                         "CAPTURE",
2926                         "CAPTURE_FS",
2927                         "SLEEP",
2928                         "BUFON_FW",
2929                         "ML_UP",
2930                         "SU_STANDBY",
2931                         "FAST_SLEEP",
2932                         "DEEP_SLEEP",
2933                         "BUF_ON",
2934                         "TG_ON"
2935                 };
2936                 val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder));
2937                 status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
2938                 if (status_val < ARRAY_SIZE(live_status))
2939                         status = live_status[status_val];
2940         } else {
2941                 static const char * const live_status[] = {
2942                         "IDLE",
2943                         "SRDONACK",
2944                         "SRDENT",
2945                         "BUFOFF",
2946                         "BUFON",
2947                         "AUXACK",
2948                         "SRDOFFACK",
2949                         "SRDENT_ON",
2950                 };
2951                 val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
2952                 status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
2953                 if (status_val < ARRAY_SIZE(live_status))
2954                         status = live_status[status_val];
2955         }
2956
2957         seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2958 }
2959
2960 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
2961 {
2962         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2963         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2964         struct intel_psr *psr = &intel_dp->psr;
2965         intel_wakeref_t wakeref;
2966         const char *status;
2967         bool enabled;
2968         u32 val;
2969
2970         seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
2971         if (psr->sink_support)
2972                 seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
2973         seq_puts(m, "\n");
2974
2975         if (!psr->sink_support)
2976                 return 0;
2977
2978         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2979         mutex_lock(&psr->lock);
2980
2981         if (psr->enabled)
2982                 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2983         else
2984                 status = "disabled";
2985         seq_printf(m, "PSR mode: %s\n", status);
2986
2987         if (!psr->enabled) {
2988                 seq_printf(m, "PSR sink not reliable: %s\n",
2989                            str_yes_no(psr->sink_not_reliable));
2990
2991                 goto unlock;
2992         }
2993
2994         if (psr->psr2_enabled) {
2995                 val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
2996                 enabled = val & EDP_PSR2_ENABLE;
2997         } else {
2998                 val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
2999                 enabled = val & EDP_PSR_ENABLE;
3000         }
3001         seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
3002                    str_enabled_disabled(enabled), val);
3003         psr_source_status(intel_dp, m);
3004         seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
3005                    psr->busy_frontbuffer_bits);
3006
3007         /*
3008          * SKL+ Perf counter is reset to 0 everytime DC state is entered
3009          */
3010         val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
3011         seq_printf(m, "Performance counter: %u\n",
3012                    REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
3013
3014         if (psr->debug & I915_PSR_DEBUG_IRQ) {
3015                 seq_printf(m, "Last attempted entry at: %lld\n",
3016                            psr->last_entry_attempt);
3017                 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3018         }
3019
3020         if (psr->psr2_enabled) {
3021                 u32 su_frames_val[3];
3022                 int frame;
3023
3024                 /*
3025                  * Reading all 3 registers before hand to minimize crossing a
3026                  * frame boundary between register reads
3027                  */
3028                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3029                         val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame));
3030                         su_frames_val[frame / 3] = val;
3031                 }
3032
3033                 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3034
3035                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3036                         u32 su_blocks;
3037
3038                         su_blocks = su_frames_val[frame / 3] &
3039                                     PSR2_SU_STATUS_MASK(frame);
3040                         su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3041                         seq_printf(m, "%d\t%d\n", frame, su_blocks);
3042                 }
3043
3044                 seq_printf(m, "PSR2 selective fetch: %s\n",
3045                            str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3046         }
3047
3048 unlock:
3049         mutex_unlock(&psr->lock);
3050         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3051
3052         return 0;
3053 }
3054
3055 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3056 {
3057         struct drm_i915_private *dev_priv = m->private;
3058         struct intel_dp *intel_dp = NULL;
3059         struct intel_encoder *encoder;
3060
3061         if (!HAS_PSR(dev_priv))
3062                 return -ENODEV;
3063
3064         /* Find the first EDP which supports PSR */
3065         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3066                 intel_dp = enc_to_intel_dp(encoder);
3067                 break;
3068         }
3069
3070         if (!intel_dp)
3071                 return -ENODEV;
3072
3073         return intel_psr_status(m, intel_dp);
3074 }
3075 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3076
3077 static int
3078 i915_edp_psr_debug_set(void *data, u64 val)
3079 {
3080         struct drm_i915_private *dev_priv = data;
3081         struct intel_encoder *encoder;
3082         intel_wakeref_t wakeref;
3083         int ret = -ENODEV;
3084
3085         if (!HAS_PSR(dev_priv))
3086                 return ret;
3087
3088         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3089                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3090
3091                 drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
3092
3093                 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3094
3095                 // TODO: split to each transcoder's PSR debug state
3096                 ret = intel_psr_debug_set(intel_dp, val);
3097
3098                 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3099         }
3100
3101         return ret;
3102 }
3103
3104 static int
3105 i915_edp_psr_debug_get(void *data, u64 *val)
3106 {
3107         struct drm_i915_private *dev_priv = data;
3108         struct intel_encoder *encoder;
3109
3110         if (!HAS_PSR(dev_priv))
3111                 return -ENODEV;
3112
3113         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3114                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3115
3116                 // TODO: split to each transcoder's PSR debug state
3117                 *val = READ_ONCE(intel_dp->psr.debug);
3118                 return 0;
3119         }
3120
3121         return -ENODEV;
3122 }
3123
3124 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3125                         i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3126                         "%llu\n");
3127
3128 void intel_psr_debugfs_register(struct drm_i915_private *i915)
3129 {
3130         struct drm_minor *minor = i915->drm.primary;
3131
3132         debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3133                             i915, &i915_edp_psr_debug_fops);
3134
3135         debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3136                             i915, &i915_edp_psr_status_fops);
3137 }
3138
3139 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3140 {
3141         struct intel_connector *connector = m->private;
3142         struct intel_dp *intel_dp = intel_attached_dp(connector);
3143         static const char * const sink_status[] = {
3144                 "inactive",
3145                 "transition to active, capture and display",
3146                 "active, display from RFB",
3147                 "active, capture and display on sink device timings",
3148                 "transition to inactive, capture and display, timing re-sync",
3149                 "reserved",
3150                 "reserved",
3151                 "sink internal error",
3152         };
3153         const char *str;
3154         int ret;
3155         u8 status, error_status;
3156
3157         if (!CAN_PSR(intel_dp)) {
3158                 seq_puts(m, "PSR Unsupported\n");
3159                 return -ENODEV;
3160         }
3161
3162         if (connector->base.status != connector_status_connected)
3163                 return -ENODEV;
3164
3165         ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
3166         if (ret)
3167                 return ret;
3168
3169         status &= DP_PSR_SINK_STATE_MASK;
3170         if (status < ARRAY_SIZE(sink_status))
3171                 str = sink_status[status];
3172         else
3173                 str = "unknown";
3174
3175         seq_printf(m, "Sink PSR status: 0x%x [%s]\n", status, str);
3176
3177         seq_printf(m, "Sink PSR error status: 0x%x", error_status);
3178
3179         if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
3180                             DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3181                             DP_PSR_LINK_CRC_ERROR))
3182                 seq_puts(m, ":\n");
3183         else
3184                 seq_puts(m, "\n");
3185         if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3186                 seq_puts(m, "\tPSR RFB storage error\n");
3187         if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3188                 seq_puts(m, "\tPSR VSC SDP uncorrectable error\n");
3189         if (error_status & DP_PSR_LINK_CRC_ERROR)
3190                 seq_puts(m, "\tPSR Link CRC error\n");
3191
3192         return ret;
3193 }
3194 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3195
3196 static int i915_psr_status_show(struct seq_file *m, void *data)
3197 {
3198         struct intel_connector *connector = m->private;
3199         struct intel_dp *intel_dp = intel_attached_dp(connector);
3200
3201         return intel_psr_status(m, intel_dp);
3202 }
3203 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3204
3205 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3206 {
3207         struct drm_i915_private *i915 = to_i915(connector->base.dev);
3208         struct dentry *root = connector->base.debugfs_entry;
3209
3210         if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
3211                 return;
3212
3213         debugfs_create_file("i915_psr_sink_status", 0444, root,
3214                             connector, &i915_psr_sink_status_fops);
3215
3216         if (HAS_PSR(i915))
3217                 debugfs_create_file("i915_psr_status", 0444, root,
3218                                     connector, &i915_psr_status_fops);
3219 }
This page took 0.308567 seconds and 4 git commands to generate.