]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/display/intel_psr.c
Merge tag 'devicetree-fixes-for-6.4-3' of git://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / drivers / gpu / drm / i915 / display / intel_psr.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
26
27 #include "i915_drv.h"
28 #include "i915_reg.h"
29 #include "intel_atomic.h"
30 #include "intel_crtc.h"
31 #include "intel_de.h"
32 #include "intel_display_types.h"
33 #include "intel_dp.h"
34 #include "intel_dp_aux.h"
35 #include "intel_hdmi.h"
36 #include "intel_psr.h"
37 #include "intel_psr_regs.h"
38 #include "intel_snps_phy.h"
39 #include "skl_universal_plane.h"
40
41 /**
42  * DOC: Panel Self Refresh (PSR/SRD)
43  *
44  * Since Haswell Display controller supports Panel Self-Refresh on display
45  * panels witch have a remote frame buffer (RFB) implemented according to PSR
46  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
47  * when system is idle but display is on as it eliminates display refresh
48  * request to DDR memory completely as long as the frame buffer for that
49  * display is unchanged.
50  *
51  * Panel Self Refresh must be supported by both Hardware (source) and
52  * Panel (sink).
53  *
54  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
55  * to power down the link and memory controller. For DSI panels the same idea
56  * is called "manual mode".
57  *
58  * The implementation uses the hardware-based PSR support which automatically
59  * enters/exits self-refresh mode. The hardware takes care of sending the
60  * required DP aux message and could even retrain the link (that part isn't
61  * enabled yet though). The hardware also keeps track of any frontbuffer
62  * changes to know when to exit self-refresh mode again. Unfortunately that
63  * part doesn't work too well, hence why the i915 PSR support uses the
64  * software frontbuffer tracking to make sure it doesn't miss a screen
65  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
66  * get called by the frontbuffer tracking code. Note that because of locking
67  * issues the self-refresh re-enable code is done from a work queue, which
68  * must be correctly synchronized/cancelled when shutting down the pipe."
69  *
70  * DC3CO (DC3 clock off)
71  *
72  * On top of PSR2, GEN12 adds a intermediate power savings state that turns
73  * clock off automatically during PSR2 idle state.
74  * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
75  * entry/exit allows the HW to enter a low-power state even when page flipping
76  * periodically (for instance a 30fps video playback scenario).
77  *
78  * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
79  * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
80  * frames, if no other flip occurs and the function above is executed, DC3CO is
81  * disabled and PSR2 is configured to enter deep sleep, resetting again in case
82  * of another flip.
83  * Front buffer modifications do not trigger DC3CO activation on purpose as it
84  * would bring a lot of complexity and most of the moderns systems will only
85  * use page flips.
86  */
87
88 static bool psr_global_enabled(struct intel_dp *intel_dp)
89 {
90         struct intel_connector *connector = intel_dp->attached_connector;
91         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
92
93         switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
94         case I915_PSR_DEBUG_DEFAULT:
95                 if (i915->params.enable_psr == -1)
96                         return connector->panel.vbt.psr.enable;
97                 return i915->params.enable_psr;
98         case I915_PSR_DEBUG_DISABLE:
99                 return false;
100         default:
101                 return true;
102         }
103 }
104
105 static bool psr2_global_enabled(struct intel_dp *intel_dp)
106 {
107         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
108
109         switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
110         case I915_PSR_DEBUG_DISABLE:
111         case I915_PSR_DEBUG_FORCE_PSR1:
112                 return false;
113         default:
114                 if (i915->params.enable_psr == 1)
115                         return false;
116                 return true;
117         }
118 }
119
120 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
121 {
122         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
123
124         return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
125                 EDP_PSR_ERROR(intel_dp->psr.transcoder);
126 }
127
128 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
129 {
130         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
131
132         return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
133                 EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
134 }
135
136 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
137 {
138         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
139
140         return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
141                 EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
142 }
143
144 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
145 {
146         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
147
148         return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
149                 EDP_PSR_MASK(intel_dp->psr.transcoder);
150 }
151
152 static void psr_irq_control(struct intel_dp *intel_dp)
153 {
154         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
155         i915_reg_t imr_reg;
156         u32 mask;
157
158         if (DISPLAY_VER(dev_priv) >= 12)
159                 imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
160         else
161                 imr_reg = EDP_PSR_IMR;
162
163         mask = psr_irq_psr_error_bit_get(intel_dp);
164         if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
165                 mask |= psr_irq_post_exit_bit_get(intel_dp) |
166                         psr_irq_pre_entry_bit_get(intel_dp);
167
168         intel_de_rmw(dev_priv, imr_reg, psr_irq_mask_get(intel_dp), ~mask);
169 }
170
171 static void psr_event_print(struct drm_i915_private *i915,
172                             u32 val, bool psr2_enabled)
173 {
174         drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
175         if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
176                 drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
177         if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
178                 drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
179         if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
180                 drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
181         if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
182                 drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
183         if (val & PSR_EVENT_GRAPHICS_RESET)
184                 drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
185         if (val & PSR_EVENT_PCH_INTERRUPT)
186                 drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
187         if (val & PSR_EVENT_MEMORY_UP)
188                 drm_dbg_kms(&i915->drm, "\tMemory up\n");
189         if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
190                 drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
191         if (val & PSR_EVENT_WD_TIMER_EXPIRE)
192                 drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
193         if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
194                 drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
195         if (val & PSR_EVENT_REGISTER_UPDATE)
196                 drm_dbg_kms(&i915->drm, "\tRegister updated\n");
197         if (val & PSR_EVENT_HDCP_ENABLE)
198                 drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
199         if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
200                 drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
201         if (val & PSR_EVENT_VBI_ENABLE)
202                 drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
203         if (val & PSR_EVENT_LPSP_MODE_EXIT)
204                 drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
205         if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
206                 drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
207 }
208
209 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
210 {
211         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
212         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
213         ktime_t time_ns =  ktime_get();
214         i915_reg_t imr_reg;
215
216         if (DISPLAY_VER(dev_priv) >= 12)
217                 imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
218         else
219                 imr_reg = EDP_PSR_IMR;
220
221         if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
222                 intel_dp->psr.last_entry_attempt = time_ns;
223                 drm_dbg_kms(&dev_priv->drm,
224                             "[transcoder %s] PSR entry attempt in 2 vblanks\n",
225                             transcoder_name(cpu_transcoder));
226         }
227
228         if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
229                 intel_dp->psr.last_exit = time_ns;
230                 drm_dbg_kms(&dev_priv->drm,
231                             "[transcoder %s] PSR exit completed\n",
232                             transcoder_name(cpu_transcoder));
233
234                 if (DISPLAY_VER(dev_priv) >= 9) {
235                         u32 val = intel_de_read(dev_priv,
236                                                 PSR_EVENT(cpu_transcoder));
237                         bool psr2_enabled = intel_dp->psr.psr2_enabled;
238
239                         intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
240                                        val);
241                         psr_event_print(dev_priv, val, psr2_enabled);
242                 }
243         }
244
245         if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
246                 drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
247                          transcoder_name(cpu_transcoder));
248
249                 intel_dp->psr.irq_aux_error = true;
250
251                 /*
252                  * If this interruption is not masked it will keep
253                  * interrupting so fast that it prevents the scheduled
254                  * work to run.
255                  * Also after a PSR error, we don't want to arm PSR
256                  * again so we don't care about unmask the interruption
257                  * or unset irq_aux_error.
258                  */
259                 intel_de_rmw(dev_priv, imr_reg, 0, psr_irq_psr_error_bit_get(intel_dp));
260
261                 schedule_work(&intel_dp->psr.work);
262         }
263 }
264
265 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
266 {
267         u8 alpm_caps = 0;
268
269         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
270                               &alpm_caps) != 1)
271                 return false;
272         return alpm_caps & DP_ALPM_CAP;
273 }
274
275 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
276 {
277         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
278         u8 val = 8; /* assume the worst if we can't read the value */
279
280         if (drm_dp_dpcd_readb(&intel_dp->aux,
281                               DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
282                 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
283         else
284                 drm_dbg_kms(&i915->drm,
285                             "Unable to get sink synchronization latency, assuming 8 frames\n");
286         return val;
287 }
288
289 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
290 {
291         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
292         ssize_t r;
293         u16 w;
294         u8 y;
295
296         /* If sink don't have specific granularity requirements set legacy ones */
297         if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
298                 /* As PSR2 HW sends full lines, we do not care about x granularity */
299                 w = 4;
300                 y = 4;
301                 goto exit;
302         }
303
304         r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
305         if (r != 2)
306                 drm_dbg_kms(&i915->drm,
307                             "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
308         /*
309          * Spec says that if the value read is 0 the default granularity should
310          * be used instead.
311          */
312         if (r != 2 || w == 0)
313                 w = 4;
314
315         r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
316         if (r != 1) {
317                 drm_dbg_kms(&i915->drm,
318                             "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
319                 y = 4;
320         }
321         if (y == 0)
322                 y = 1;
323
324 exit:
325         intel_dp->psr.su_w_granularity = w;
326         intel_dp->psr.su_y_granularity = y;
327 }
328
329 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
330 {
331         struct drm_i915_private *dev_priv =
332                 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
333
334         drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
335                          sizeof(intel_dp->psr_dpcd));
336
337         if (!intel_dp->psr_dpcd[0])
338                 return;
339         drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
340                     intel_dp->psr_dpcd[0]);
341
342         if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
343                 drm_dbg_kms(&dev_priv->drm,
344                             "PSR support not currently available for this panel\n");
345                 return;
346         }
347
348         if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
349                 drm_dbg_kms(&dev_priv->drm,
350                             "Panel lacks power state control, PSR cannot be enabled\n");
351                 return;
352         }
353
354         intel_dp->psr.sink_support = true;
355         intel_dp->psr.sink_sync_latency =
356                 intel_dp_get_sink_sync_latency(intel_dp);
357
358         if (DISPLAY_VER(dev_priv) >= 9 &&
359             (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
360                 bool y_req = intel_dp->psr_dpcd[1] &
361                              DP_PSR2_SU_Y_COORDINATE_REQUIRED;
362                 bool alpm = intel_dp_get_alpm_status(intel_dp);
363
364                 /*
365                  * All panels that supports PSR version 03h (PSR2 +
366                  * Y-coordinate) can handle Y-coordinates in VSC but we are
367                  * only sure that it is going to be used when required by the
368                  * panel. This way panel is capable to do selective update
369                  * without a aux frame sync.
370                  *
371                  * To support PSR version 02h and PSR version 03h without
372                  * Y-coordinate requirement panels we would need to enable
373                  * GTC first.
374                  */
375                 intel_dp->psr.sink_psr2_support = y_req && alpm;
376                 drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
377                             intel_dp->psr.sink_psr2_support ? "" : "not ");
378
379                 if (intel_dp->psr.sink_psr2_support) {
380                         intel_dp->psr.colorimetry_support =
381                                 intel_dp_get_colorimetry_status(intel_dp);
382                         intel_dp_get_su_granularity(intel_dp);
383                 }
384         }
385 }
386
387 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
388 {
389         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
390         u8 dpcd_val = DP_PSR_ENABLE;
391
392         /* Enable ALPM at sink for psr2 */
393         if (intel_dp->psr.psr2_enabled) {
394                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
395                                    DP_ALPM_ENABLE |
396                                    DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
397
398                 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
399         } else {
400                 if (intel_dp->psr.link_standby)
401                         dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
402
403                 if (DISPLAY_VER(dev_priv) >= 8)
404                         dpcd_val |= DP_PSR_CRC_VERIFICATION;
405         }
406
407         if (intel_dp->psr.req_psr2_sdp_prior_scanline)
408                 dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
409
410         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
411
412         drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
413 }
414
415 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
416 {
417         struct intel_connector *connector = intel_dp->attached_connector;
418         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
419         u32 val = 0;
420
421         if (DISPLAY_VER(dev_priv) >= 11)
422                 val |= EDP_PSR_TP4_TIME_0US;
423
424         if (dev_priv->params.psr_safest_params) {
425                 val |= EDP_PSR_TP1_TIME_2500us;
426                 val |= EDP_PSR_TP2_TP3_TIME_2500us;
427                 goto check_tp3_sel;
428         }
429
430         if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
431                 val |= EDP_PSR_TP1_TIME_0us;
432         else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
433                 val |= EDP_PSR_TP1_TIME_100us;
434         else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
435                 val |= EDP_PSR_TP1_TIME_500us;
436         else
437                 val |= EDP_PSR_TP1_TIME_2500us;
438
439         if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
440                 val |= EDP_PSR_TP2_TP3_TIME_0us;
441         else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
442                 val |= EDP_PSR_TP2_TP3_TIME_100us;
443         else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
444                 val |= EDP_PSR_TP2_TP3_TIME_500us;
445         else
446                 val |= EDP_PSR_TP2_TP3_TIME_2500us;
447
448 check_tp3_sel:
449         if (intel_dp_source_supports_tps3(dev_priv) &&
450             drm_dp_tps3_supported(intel_dp->dpcd))
451                 val |= EDP_PSR_TP1_TP3_SEL;
452         else
453                 val |= EDP_PSR_TP1_TP2_SEL;
454
455         return val;
456 }
457
458 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
459 {
460         struct intel_connector *connector = intel_dp->attached_connector;
461         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
462         int idle_frames;
463
464         /* Let's use 6 as the minimum to cover all known cases including the
465          * off-by-one issue that HW has in some cases.
466          */
467         idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
468         idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
469
470         if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
471                 idle_frames = 0xf;
472
473         return idle_frames;
474 }
475
476 static void hsw_activate_psr1(struct intel_dp *intel_dp)
477 {
478         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
479         u32 max_sleep_time = 0x1f;
480         u32 val = EDP_PSR_ENABLE;
481
482         val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT;
483
484         val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
485         if (IS_HASWELL(dev_priv))
486                 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
487
488         if (intel_dp->psr.link_standby)
489                 val |= EDP_PSR_LINK_STANDBY;
490
491         val |= intel_psr1_get_tp_time(intel_dp);
492
493         if (DISPLAY_VER(dev_priv) >= 8)
494                 val |= EDP_PSR_CRC_ENABLE;
495
496         val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)) &
497                 EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
498         intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), val);
499 }
500
501 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
502 {
503         struct intel_connector *connector = intel_dp->attached_connector;
504         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
505         u32 val = 0;
506
507         if (dev_priv->params.psr_safest_params)
508                 return EDP_PSR2_TP2_TIME_2500us;
509
510         if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
511             connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
512                 val |= EDP_PSR2_TP2_TIME_50us;
513         else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
514                 val |= EDP_PSR2_TP2_TIME_100us;
515         else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
516                 val |= EDP_PSR2_TP2_TIME_500us;
517         else
518                 val |= EDP_PSR2_TP2_TIME_2500us;
519
520         return val;
521 }
522
523 static int psr2_block_count_lines(struct intel_dp *intel_dp)
524 {
525         return intel_dp->psr.io_wake_lines < 9 &&
526                 intel_dp->psr.fast_wake_lines < 9 ? 8 : 12;
527 }
528
529 static int psr2_block_count(struct intel_dp *intel_dp)
530 {
531         return psr2_block_count_lines(intel_dp) / 4;
532 }
533
534 static void hsw_activate_psr2(struct intel_dp *intel_dp)
535 {
536         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
537         u32 val = EDP_PSR2_ENABLE;
538
539         val |= psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
540
541         if (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))
542                 val |= EDP_SU_TRACK_ENABLE;
543
544         if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
545                 val |= EDP_Y_COORDINATE_ENABLE;
546
547         val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
548         val |= intel_psr2_get_tp_time(intel_dp);
549
550         if (DISPLAY_VER(dev_priv) >= 12) {
551                 if (psr2_block_count(intel_dp) > 2)
552                         val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
553                 else
554                         val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
555         }
556
557         /* Wa_22012278275:adl-p */
558         if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
559                 static const u8 map[] = {
560                         2, /* 5 lines */
561                         1, /* 6 lines */
562                         0, /* 7 lines */
563                         3, /* 8 lines */
564                         6, /* 9 lines */
565                         5, /* 10 lines */
566                         4, /* 11 lines */
567                         7, /* 12 lines */
568                 };
569                 /*
570                  * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
571                  * comments bellow for more information
572                  */
573                 u32 tmp;
574
575                 tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
576                 tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT;
577                 val |= tmp;
578
579                 tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
580                 tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT;
581                 val |= tmp;
582         } else if (DISPLAY_VER(dev_priv) >= 12) {
583                 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
584                 val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
585         } else if (DISPLAY_VER(dev_priv) >= 9) {
586                 val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
587                 val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
588         }
589
590         if (intel_dp->psr.req_psr2_sdp_prior_scanline)
591                 val |= EDP_PSR2_SU_SDP_SCANLINE;
592
593         if (intel_dp->psr.psr2_sel_fetch_enabled) {
594                 u32 tmp;
595
596                 tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
597                 drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
598         } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
599                 intel_de_write(dev_priv,
600                                PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0);
601         }
602
603         /*
604          * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
605          * recommending keep this bit unset while PSR2 is enabled.
606          */
607         intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), 0);
608
609         intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
610 }
611
612 static bool
613 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
614 {
615         if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
616                 return trans == TRANSCODER_A || trans == TRANSCODER_B;
617         else if (DISPLAY_VER(dev_priv) >= 12)
618                 return trans == TRANSCODER_A;
619         else
620                 return trans == TRANSCODER_EDP;
621 }
622
623 static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
624 {
625         if (!cstate || !cstate->hw.active)
626                 return 0;
627
628         return DIV_ROUND_UP(1000 * 1000,
629                             drm_mode_vrefresh(&cstate->hw.adjusted_mode));
630 }
631
632 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
633                                      u32 idle_frames)
634 {
635         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
636
637         idle_frames <<=  EDP_PSR2_IDLE_FRAME_SHIFT;
638         intel_de_rmw(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder),
639                      EDP_PSR2_IDLE_FRAME_MASK, idle_frames);
640 }
641
642 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
643 {
644         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
645
646         psr2_program_idle_frames(intel_dp, 0);
647         intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
648 }
649
650 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
651 {
652         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
653
654         intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
655         psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
656 }
657
658 static void tgl_dc3co_disable_work(struct work_struct *work)
659 {
660         struct intel_dp *intel_dp =
661                 container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
662
663         mutex_lock(&intel_dp->psr.lock);
664         /* If delayed work is pending, it is not idle */
665         if (delayed_work_pending(&intel_dp->psr.dc3co_work))
666                 goto unlock;
667
668         tgl_psr2_disable_dc3co(intel_dp);
669 unlock:
670         mutex_unlock(&intel_dp->psr.lock);
671 }
672
673 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
674 {
675         if (!intel_dp->psr.dc3co_exitline)
676                 return;
677
678         cancel_delayed_work(&intel_dp->psr.dc3co_work);
679         /* Before PSR2 exit disallow dc3co*/
680         tgl_psr2_disable_dc3co(intel_dp);
681 }
682
683 static bool
684 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
685                               struct intel_crtc_state *crtc_state)
686 {
687         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
688         enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
689         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
690         enum port port = dig_port->base.port;
691
692         if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
693                 return pipe <= PIPE_B && port <= PORT_B;
694         else
695                 return pipe == PIPE_A && port == PORT_A;
696 }
697
698 static void
699 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
700                                   struct intel_crtc_state *crtc_state)
701 {
702         const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
703         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
704         struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
705         u32 exit_scanlines;
706
707         /*
708          * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
709          * disable DC3CO until the changed dc3co activating/deactivating sequence
710          * is applied. B.Specs:49196
711          */
712         return;
713
714         /*
715          * DMC's DC3CO exit mechanism has an issue with Selective Fecth
716          * TODO: when the issue is addressed, this restriction should be removed.
717          */
718         if (crtc_state->enable_psr2_sel_fetch)
719                 return;
720
721         if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
722                 return;
723
724         if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
725                 return;
726
727         /* Wa_16011303918:adl-p */
728         if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
729                 return;
730
731         /*
732          * DC3CO Exit time 200us B.Spec 49196
733          * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
734          */
735         exit_scanlines =
736                 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
737
738         if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
739                 return;
740
741         crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
742 }
743
744 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
745                                               struct intel_crtc_state *crtc_state)
746 {
747         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
748
749         if (!dev_priv->params.enable_psr2_sel_fetch &&
750             intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
751                 drm_dbg_kms(&dev_priv->drm,
752                             "PSR2 sel fetch not enabled, disabled by parameter\n");
753                 return false;
754         }
755
756         if (crtc_state->uapi.async_flip) {
757                 drm_dbg_kms(&dev_priv->drm,
758                             "PSR2 sel fetch not enabled, async flip enabled\n");
759                 return false;
760         }
761
762         return crtc_state->enable_psr2_sel_fetch = true;
763 }
764
765 static bool psr2_granularity_check(struct intel_dp *intel_dp,
766                                    struct intel_crtc_state *crtc_state)
767 {
768         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
769         const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
770         const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
771         const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
772         u16 y_granularity = 0;
773
774         /* PSR2 HW only send full lines so we only need to validate the width */
775         if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
776                 return false;
777
778         if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
779                 return false;
780
781         /* HW tracking is only aligned to 4 lines */
782         if (!crtc_state->enable_psr2_sel_fetch)
783                 return intel_dp->psr.su_y_granularity == 4;
784
785         /*
786          * adl_p and mtl platforms have 1 line granularity.
787          * For other platforms with SW tracking we can adjust the y coordinates
788          * to match sink requirement if multiple of 4.
789          */
790         if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
791                 y_granularity = intel_dp->psr.su_y_granularity;
792         else if (intel_dp->psr.su_y_granularity <= 2)
793                 y_granularity = 4;
794         else if ((intel_dp->psr.su_y_granularity % 4) == 0)
795                 y_granularity = intel_dp->psr.su_y_granularity;
796
797         if (y_granularity == 0 || crtc_vdisplay % y_granularity)
798                 return false;
799
800         if (crtc_state->dsc.compression_enable &&
801             vdsc_cfg->slice_height % y_granularity)
802                 return false;
803
804         crtc_state->su_y_granularity = y_granularity;
805         return true;
806 }
807
808 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
809                                                         struct intel_crtc_state *crtc_state)
810 {
811         const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
812         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
813         u32 hblank_total, hblank_ns, req_ns;
814
815         hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
816         hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
817
818         /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
819         req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
820
821         if ((hblank_ns - req_ns) > 100)
822                 return true;
823
824         /* Not supported <13 / Wa_22012279113:adl-p */
825         if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
826                 return false;
827
828         crtc_state->req_psr2_sdp_prior_scanline = true;
829         return true;
830 }
831
832 static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
833                                      struct intel_crtc_state *crtc_state)
834 {
835         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
836         int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
837         u8 max_wake_lines;
838
839         if (DISPLAY_VER(i915) >= 12) {
840                 io_wake_time = 42;
841                 /*
842                  * According to Bspec it's 42us, but based on testing
843                  * it is not enough -> use 45 us.
844                  */
845                 fast_wake_time = 45;
846                 max_wake_lines = 12;
847         } else {
848                 io_wake_time = 50;
849                 fast_wake_time = 32;
850                 max_wake_lines = 8;
851         }
852
853         io_wake_lines = intel_usecs_to_scanlines(
854                 &crtc_state->uapi.adjusted_mode, io_wake_time);
855         fast_wake_lines = intel_usecs_to_scanlines(
856                 &crtc_state->uapi.adjusted_mode, fast_wake_time);
857
858         if (io_wake_lines > max_wake_lines ||
859             fast_wake_lines > max_wake_lines)
860                 return false;
861
862         if (i915->params.psr_safest_params)
863                 io_wake_lines = fast_wake_lines = max_wake_lines;
864
865         /* According to Bspec lower limit should be set as 7 lines. */
866         intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
867         intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
868
869         return true;
870 }
871
872 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
873                                     struct intel_crtc_state *crtc_state)
874 {
875         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
876         int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
877         int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
878         int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
879
880         if (!intel_dp->psr.sink_psr2_support)
881                 return false;
882
883         /* JSL and EHL only supports eDP 1.3 */
884         if (IS_JSL_EHL(dev_priv)) {
885                 drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
886                 return false;
887         }
888
889         /* Wa_16011181250 */
890         if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
891             IS_DG2(dev_priv)) {
892                 drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
893                 return false;
894         }
895
896         if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
897                 drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
898                 return false;
899         }
900
901         if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
902                 drm_dbg_kms(&dev_priv->drm,
903                             "PSR2 not supported in transcoder %s\n",
904                             transcoder_name(crtc_state->cpu_transcoder));
905                 return false;
906         }
907
908         if (!psr2_global_enabled(intel_dp)) {
909                 drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
910                 return false;
911         }
912
913         /*
914          * DSC and PSR2 cannot be enabled simultaneously. If a requested
915          * resolution requires DSC to be enabled, priority is given to DSC
916          * over PSR2.
917          */
918         if (crtc_state->dsc.compression_enable &&
919             (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))) {
920                 drm_dbg_kms(&dev_priv->drm,
921                             "PSR2 cannot be enabled since DSC is enabled\n");
922                 return false;
923         }
924
925         if (crtc_state->crc_enabled) {
926                 drm_dbg_kms(&dev_priv->drm,
927                             "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
928                 return false;
929         }
930
931         if (DISPLAY_VER(dev_priv) >= 12) {
932                 psr_max_h = 5120;
933                 psr_max_v = 3200;
934                 max_bpp = 30;
935         } else if (DISPLAY_VER(dev_priv) >= 10) {
936                 psr_max_h = 4096;
937                 psr_max_v = 2304;
938                 max_bpp = 24;
939         } else if (DISPLAY_VER(dev_priv) == 9) {
940                 psr_max_h = 3640;
941                 psr_max_v = 2304;
942                 max_bpp = 24;
943         }
944
945         if (crtc_state->pipe_bpp > max_bpp) {
946                 drm_dbg_kms(&dev_priv->drm,
947                             "PSR2 not enabled, pipe bpp %d > max supported %d\n",
948                             crtc_state->pipe_bpp, max_bpp);
949                 return false;
950         }
951
952         /* Wa_16011303918:adl-p */
953         if (crtc_state->vrr.enable &&
954             IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
955                 drm_dbg_kms(&dev_priv->drm,
956                             "PSR2 not enabled, not compatible with HW stepping + VRR\n");
957                 return false;
958         }
959
960         if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
961                 drm_dbg_kms(&dev_priv->drm,
962                             "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
963                 return false;
964         }
965
966         if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
967                 drm_dbg_kms(&dev_priv->drm,
968                             "PSR2 not enabled, Unable to use long enough wake times\n");
969                 return false;
970         }
971
972         /* Vblank >= PSR2_CTL Block Count Number maximum line count */
973         if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
974             crtc_state->hw.adjusted_mode.crtc_vblank_start <
975             psr2_block_count_lines(intel_dp)) {
976                 drm_dbg_kms(&dev_priv->drm,
977                             "PSR2 not enabled, too short vblank time\n");
978                 return false;
979         }
980
981         if (HAS_PSR2_SEL_FETCH(dev_priv)) {
982                 if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
983                     !HAS_PSR_HW_TRACKING(dev_priv)) {
984                         drm_dbg_kms(&dev_priv->drm,
985                                     "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
986                         return false;
987                 }
988         }
989
990         if (!psr2_granularity_check(intel_dp, crtc_state)) {
991                 drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
992                 goto unsupported;
993         }
994
995         if (!crtc_state->enable_psr2_sel_fetch &&
996             (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
997                 drm_dbg_kms(&dev_priv->drm,
998                             "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
999                             crtc_hdisplay, crtc_vdisplay,
1000                             psr_max_h, psr_max_v);
1001                 goto unsupported;
1002         }
1003
1004         tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1005         return true;
1006
1007 unsupported:
1008         crtc_state->enable_psr2_sel_fetch = false;
1009         return false;
1010 }
1011
1012 void intel_psr_compute_config(struct intel_dp *intel_dp,
1013                               struct intel_crtc_state *crtc_state,
1014                               struct drm_connector_state *conn_state)
1015 {
1016         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1017         const struct drm_display_mode *adjusted_mode =
1018                 &crtc_state->hw.adjusted_mode;
1019         int psr_setup_time;
1020
1021         /*
1022          * Current PSR panels don't work reliably with VRR enabled
1023          * So if VRR is enabled, do not enable PSR.
1024          */
1025         if (crtc_state->vrr.enable)
1026                 return;
1027
1028         if (!CAN_PSR(intel_dp))
1029                 return;
1030
1031         if (!psr_global_enabled(intel_dp)) {
1032                 drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1033                 return;
1034         }
1035
1036         if (intel_dp->psr.sink_not_reliable) {
1037                 drm_dbg_kms(&dev_priv->drm,
1038                             "PSR sink implementation is not reliable\n");
1039                 return;
1040         }
1041
1042         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1043                 drm_dbg_kms(&dev_priv->drm,
1044                             "PSR condition failed: Interlaced mode enabled\n");
1045                 return;
1046         }
1047
1048         psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1049         if (psr_setup_time < 0) {
1050                 drm_dbg_kms(&dev_priv->drm,
1051                             "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1052                             intel_dp->psr_dpcd[1]);
1053                 return;
1054         }
1055
1056         if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1057             adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1058                 drm_dbg_kms(&dev_priv->drm,
1059                             "PSR condition failed: PSR setup time (%d us) too long\n",
1060                             psr_setup_time);
1061                 return;
1062         }
1063
1064         crtc_state->has_psr = true;
1065         crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1066
1067         crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1068         intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1069                                      &crtc_state->psr_vsc);
1070 }
1071
1072 void intel_psr_get_config(struct intel_encoder *encoder,
1073                           struct intel_crtc_state *pipe_config)
1074 {
1075         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1076         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1077         struct intel_dp *intel_dp;
1078         u32 val;
1079
1080         if (!dig_port)
1081                 return;
1082
1083         intel_dp = &dig_port->dp;
1084         if (!CAN_PSR(intel_dp))
1085                 return;
1086
1087         mutex_lock(&intel_dp->psr.lock);
1088         if (!intel_dp->psr.enabled)
1089                 goto unlock;
1090
1091         /*
1092          * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1093          * enabled/disabled because of frontbuffer tracking and others.
1094          */
1095         pipe_config->has_psr = true;
1096         pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1097         pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1098
1099         if (!intel_dp->psr.psr2_enabled)
1100                 goto unlock;
1101
1102         if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1103                 val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
1104                 if (val & PSR2_MAN_TRK_CTL_ENABLE)
1105                         pipe_config->enable_psr2_sel_fetch = true;
1106         }
1107
1108         if (DISPLAY_VER(dev_priv) >= 12) {
1109                 val = intel_de_read(dev_priv, TRANS_EXITLINE(intel_dp->psr.transcoder));
1110                 val &= EXITLINE_MASK;
1111                 pipe_config->dc3co_exitline = val;
1112         }
1113 unlock:
1114         mutex_unlock(&intel_dp->psr.lock);
1115 }
1116
1117 static void intel_psr_activate(struct intel_dp *intel_dp)
1118 {
1119         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1120         enum transcoder transcoder = intel_dp->psr.transcoder;
1121
1122         if (transcoder_has_psr2(dev_priv, transcoder))
1123                 drm_WARN_ON(&dev_priv->drm,
1124                             intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE);
1125
1126         drm_WARN_ON(&dev_priv->drm,
1127                     intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE);
1128         drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1129         lockdep_assert_held(&intel_dp->psr.lock);
1130
1131         /* psr1 and psr2 are mutually exclusive.*/
1132         if (intel_dp->psr.psr2_enabled)
1133                 hsw_activate_psr2(intel_dp);
1134         else
1135                 hsw_activate_psr1(intel_dp);
1136
1137         intel_dp->psr.active = true;
1138 }
1139
1140 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1141 {
1142         switch (intel_dp->psr.pipe) {
1143         case PIPE_A:
1144                 return LATENCY_REPORTING_REMOVED_PIPE_A;
1145         case PIPE_B:
1146                 return LATENCY_REPORTING_REMOVED_PIPE_B;
1147         case PIPE_C:
1148                 return LATENCY_REPORTING_REMOVED_PIPE_C;
1149         case PIPE_D:
1150                 return LATENCY_REPORTING_REMOVED_PIPE_D;
1151         default:
1152                 MISSING_CASE(intel_dp->psr.pipe);
1153                 return 0;
1154         }
1155 }
1156
1157 /*
1158  * Wa_16013835468
1159  * Wa_14015648006
1160  */
1161 static void wm_optimization_wa(struct intel_dp *intel_dp,
1162                                const struct intel_crtc_state *crtc_state)
1163 {
1164         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1165         bool set_wa_bit = false;
1166
1167         /* Wa_14015648006 */
1168         if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
1169             IS_DISPLAY_VER(dev_priv, 11, 13))
1170                 set_wa_bit |= crtc_state->wm_level_disabled;
1171
1172         /* Wa_16013835468 */
1173         if (DISPLAY_VER(dev_priv) == 12)
1174                 set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1175                         crtc_state->hw.adjusted_mode.crtc_vdisplay;
1176
1177         if (set_wa_bit)
1178                 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1179                              0, wa_16013835468_bit_get(intel_dp));
1180         else
1181                 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1182                              wa_16013835468_bit_get(intel_dp), 0);
1183 }
1184
1185 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1186                                     const struct intel_crtc_state *crtc_state)
1187 {
1188         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1189         enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1190         u32 mask;
1191
1192         /*
1193          * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1194          * mask LPSP to avoid dependency on other drivers that might block
1195          * runtime_pm besides preventing  other hw tracking issues now we
1196          * can rely on frontbuffer tracking.
1197          */
1198         mask = EDP_PSR_DEBUG_MASK_MEMUP |
1199                EDP_PSR_DEBUG_MASK_HPD |
1200                EDP_PSR_DEBUG_MASK_LPSP |
1201                EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1202
1203         if (DISPLAY_VER(dev_priv) < 11)
1204                 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1205
1206         intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder),
1207                        mask);
1208
1209         psr_irq_control(intel_dp);
1210
1211         /*
1212          * TODO: if future platforms supports DC3CO in more than one
1213          * transcoder, EXITLINE will need to be unset when disabling PSR
1214          */
1215         if (intel_dp->psr.dc3co_exitline)
1216                 intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
1217                              intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1218
1219         if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1220                 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1221                              intel_dp->psr.psr2_sel_fetch_enabled ?
1222                              IGNORE_PSR2_HW_TRACKING : 0);
1223
1224         /*
1225          * Wa_16013835468
1226          * Wa_14015648006
1227          */
1228         wm_optimization_wa(intel_dp, crtc_state);
1229
1230         if (intel_dp->psr.psr2_enabled) {
1231                 if (DISPLAY_VER(dev_priv) == 9)
1232                         intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1233                                      PSR2_VSC_ENABLE_PROG_HEADER |
1234                                      PSR2_ADD_VERTICAL_LINE_COUNT);
1235
1236                 /*
1237                  * Wa_16014451276:adlp,mtl[a0,b0]
1238                  * All supported adlp panels have 1-based X granularity, this may
1239                  * cause issues if non-supported panels are used.
1240                  */
1241                 if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1242                         intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), 0,
1243                                      ADLP_1_BASED_X_GRANULARITY);
1244                 else if (IS_ALDERLAKE_P(dev_priv))
1245                         intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1246                                      ADLP_1_BASED_X_GRANULARITY);
1247
1248                 /* Wa_16012604467:adlp,mtl[a0,b0] */
1249                 if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1250                         intel_de_rmw(dev_priv,
1251                                      MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
1252                                      MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1253                 else if (IS_ALDERLAKE_P(dev_priv))
1254                         intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1255                                      CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1256         }
1257 }
1258
1259 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1260 {
1261         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1262         u32 val;
1263
1264         /*
1265          * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1266          * will still keep the error set even after the reset done in the
1267          * irq_preinstall and irq_uninstall hooks.
1268          * And enabling in this situation cause the screen to freeze in the
1269          * first time that PSR HW tries to activate so lets keep PSR disabled
1270          * to avoid any rendering problems.
1271          */
1272         if (DISPLAY_VER(dev_priv) >= 12)
1273                 val = intel_de_read(dev_priv,
1274                                     TRANS_PSR_IIR(intel_dp->psr.transcoder));
1275         else
1276                 val = intel_de_read(dev_priv, EDP_PSR_IIR);
1277         val &= psr_irq_psr_error_bit_get(intel_dp);
1278         if (val) {
1279                 intel_dp->psr.sink_not_reliable = true;
1280                 drm_dbg_kms(&dev_priv->drm,
1281                             "PSR interruption error set, not enabling PSR\n");
1282                 return false;
1283         }
1284
1285         return true;
1286 }
1287
1288 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1289                                     const struct intel_crtc_state *crtc_state)
1290 {
1291         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1292         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1293         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
1294         struct intel_encoder *encoder = &dig_port->base;
1295         u32 val;
1296
1297         drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1298
1299         intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1300         intel_dp->psr.busy_frontbuffer_bits = 0;
1301         intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1302         intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1303         /* DC5/DC6 requires at least 6 idle frames */
1304         val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1305         intel_dp->psr.dc3co_exit_delay = val;
1306         intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1307         intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1308         intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1309         intel_dp->psr.req_psr2_sdp_prior_scanline =
1310                 crtc_state->req_psr2_sdp_prior_scanline;
1311
1312         if (!psr_interrupt_error_check(intel_dp))
1313                 return;
1314
1315         drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1316                     intel_dp->psr.psr2_enabled ? "2" : "1");
1317         intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
1318         intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
1319         intel_psr_enable_sink(intel_dp);
1320         intel_psr_enable_source(intel_dp, crtc_state);
1321         intel_dp->psr.enabled = true;
1322         intel_dp->psr.paused = false;
1323
1324         intel_psr_activate(intel_dp);
1325 }
1326
1327 static void intel_psr_exit(struct intel_dp *intel_dp)
1328 {
1329         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1330         u32 val;
1331
1332         if (!intel_dp->psr.active) {
1333                 if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) {
1334                         val = intel_de_read(dev_priv,
1335                                             EDP_PSR2_CTL(intel_dp->psr.transcoder));
1336                         drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1337                 }
1338
1339                 val = intel_de_read(dev_priv,
1340                                     EDP_PSR_CTL(intel_dp->psr.transcoder));
1341                 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1342
1343                 return;
1344         }
1345
1346         if (intel_dp->psr.psr2_enabled) {
1347                 tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1348                 val = intel_de_read(dev_priv,
1349                                     EDP_PSR2_CTL(intel_dp->psr.transcoder));
1350                 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1351                 val &= ~EDP_PSR2_ENABLE;
1352                 intel_de_write(dev_priv,
1353                                EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
1354         } else {
1355                 val = intel_de_read(dev_priv,
1356                                     EDP_PSR_CTL(intel_dp->psr.transcoder));
1357                 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1358                 val &= ~EDP_PSR_ENABLE;
1359                 intel_de_write(dev_priv,
1360                                EDP_PSR_CTL(intel_dp->psr.transcoder), val);
1361         }
1362         intel_dp->psr.active = false;
1363 }
1364
1365 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1366 {
1367         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1368         i915_reg_t psr_status;
1369         u32 psr_status_mask;
1370
1371         if (intel_dp->psr.psr2_enabled) {
1372                 psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
1373                 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1374         } else {
1375                 psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder);
1376                 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1377         }
1378
1379         /* Wait till PSR is idle */
1380         if (intel_de_wait_for_clear(dev_priv, psr_status,
1381                                     psr_status_mask, 2000))
1382                 drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1383 }
1384
1385 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1386 {
1387         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1388         enum phy phy = intel_port_to_phy(dev_priv,
1389                                          dp_to_dig_port(intel_dp)->base.port);
1390
1391         lockdep_assert_held(&intel_dp->psr.lock);
1392
1393         if (!intel_dp->psr.enabled)
1394                 return;
1395
1396         drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1397                     intel_dp->psr.psr2_enabled ? "2" : "1");
1398
1399         intel_psr_exit(intel_dp);
1400         intel_psr_wait_exit_locked(intel_dp);
1401
1402         /*
1403          * Wa_16013835468
1404          * Wa_14015648006
1405          */
1406         if (DISPLAY_VER(dev_priv) >= 11)
1407                 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1408                              wa_16013835468_bit_get(intel_dp), 0);
1409
1410         if (intel_dp->psr.psr2_enabled) {
1411                 /* Wa_16012604467:adlp,mtl[a0,b0] */
1412                 if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1413                         intel_de_rmw(dev_priv,
1414                                      MTL_CLKGATE_DIS_TRANS(intel_dp->psr.transcoder),
1415                                      MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
1416                 else if (IS_ALDERLAKE_P(dev_priv))
1417                         intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
1418                                      CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
1419         }
1420
1421         intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
1422
1423         /* Disable PSR on Sink */
1424         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1425
1426         if (intel_dp->psr.psr2_enabled)
1427                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1428
1429         intel_dp->psr.enabled = false;
1430         intel_dp->psr.psr2_enabled = false;
1431         intel_dp->psr.psr2_sel_fetch_enabled = false;
1432         intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1433 }
1434
1435 /**
1436  * intel_psr_disable - Disable PSR
1437  * @intel_dp: Intel DP
1438  * @old_crtc_state: old CRTC state
1439  *
1440  * This function needs to be called before disabling pipe.
1441  */
1442 void intel_psr_disable(struct intel_dp *intel_dp,
1443                        const struct intel_crtc_state *old_crtc_state)
1444 {
1445         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1446
1447         if (!old_crtc_state->has_psr)
1448                 return;
1449
1450         if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1451                 return;
1452
1453         mutex_lock(&intel_dp->psr.lock);
1454
1455         intel_psr_disable_locked(intel_dp);
1456
1457         mutex_unlock(&intel_dp->psr.lock);
1458         cancel_work_sync(&intel_dp->psr.work);
1459         cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1460 }
1461
1462 /**
1463  * intel_psr_pause - Pause PSR
1464  * @intel_dp: Intel DP
1465  *
1466  * This function need to be called after enabling psr.
1467  */
1468 void intel_psr_pause(struct intel_dp *intel_dp)
1469 {
1470         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1471         struct intel_psr *psr = &intel_dp->psr;
1472
1473         if (!CAN_PSR(intel_dp))
1474                 return;
1475
1476         mutex_lock(&psr->lock);
1477
1478         if (!psr->enabled) {
1479                 mutex_unlock(&psr->lock);
1480                 return;
1481         }
1482
1483         /* If we ever hit this, we will need to add refcount to pause/resume */
1484         drm_WARN_ON(&dev_priv->drm, psr->paused);
1485
1486         intel_psr_exit(intel_dp);
1487         intel_psr_wait_exit_locked(intel_dp);
1488         psr->paused = true;
1489
1490         mutex_unlock(&psr->lock);
1491
1492         cancel_work_sync(&psr->work);
1493         cancel_delayed_work_sync(&psr->dc3co_work);
1494 }
1495
1496 /**
1497  * intel_psr_resume - Resume PSR
1498  * @intel_dp: Intel DP
1499  *
1500  * This function need to be called after pausing psr.
1501  */
1502 void intel_psr_resume(struct intel_dp *intel_dp)
1503 {
1504         struct intel_psr *psr = &intel_dp->psr;
1505
1506         if (!CAN_PSR(intel_dp))
1507                 return;
1508
1509         mutex_lock(&psr->lock);
1510
1511         if (!psr->paused)
1512                 goto unlock;
1513
1514         psr->paused = false;
1515         intel_psr_activate(intel_dp);
1516
1517 unlock:
1518         mutex_unlock(&psr->lock);
1519 }
1520
1521 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
1522 {
1523         return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
1524                 PSR2_MAN_TRK_CTL_ENABLE;
1525 }
1526
1527 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
1528 {
1529         return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1530                ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
1531                PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1532 }
1533
1534 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
1535 {
1536         return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1537                ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
1538                PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1539 }
1540
1541 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
1542 {
1543         return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1544                ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
1545                PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
1546 }
1547
1548 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1549 {
1550         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1551
1552         if (intel_dp->psr.psr2_sel_fetch_enabled)
1553                 intel_de_write(dev_priv,
1554                                PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
1555                                man_trk_ctl_enable_bit_get(dev_priv) |
1556                                man_trk_ctl_partial_frame_bit_get(dev_priv) |
1557                                man_trk_ctl_single_full_frame_bit_get(dev_priv) |
1558                                man_trk_ctl_continuos_full_frame(dev_priv));
1559
1560         /*
1561          * Display WA #0884: skl+
1562          * This documented WA for bxt can be safely applied
1563          * broadly so we can force HW tracking to exit PSR
1564          * instead of disabling and re-enabling.
1565          * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1566          * but it makes more sense write to the current active
1567          * pipe.
1568          *
1569          * This workaround do not exist for platforms with display 10 or newer
1570          * but testing proved that it works for up display 13, for newer
1571          * than that testing will be needed.
1572          */
1573         intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1574 }
1575
1576 void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
1577                                             const struct intel_crtc_state *crtc_state)
1578 {
1579         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1580         enum pipe pipe = plane->pipe;
1581
1582         if (!crtc_state->enable_psr2_sel_fetch)
1583                 return;
1584
1585         intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
1586 }
1587
1588 void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
1589                                             const struct intel_crtc_state *crtc_state,
1590                                             const struct intel_plane_state *plane_state)
1591 {
1592         struct drm_i915_private *i915 = to_i915(plane->base.dev);
1593         enum pipe pipe = plane->pipe;
1594
1595         if (!crtc_state->enable_psr2_sel_fetch)
1596                 return;
1597
1598         if (plane->id == PLANE_CURSOR)
1599                 intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1600                                   plane_state->ctl);
1601         else
1602                 intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1603                                   PLANE_SEL_FETCH_CTL_ENABLE);
1604 }
1605
1606 void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
1607                                               const struct intel_crtc_state *crtc_state,
1608                                               const struct intel_plane_state *plane_state,
1609                                               int color_plane)
1610 {
1611         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1612         enum pipe pipe = plane->pipe;
1613         const struct drm_rect *clip;
1614         u32 val;
1615         int x, y;
1616
1617         if (!crtc_state->enable_psr2_sel_fetch)
1618                 return;
1619
1620         if (plane->id == PLANE_CURSOR)
1621                 return;
1622
1623         clip = &plane_state->psr2_sel_fetch_area;
1624
1625         val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
1626         val |= plane_state->uapi.dst.x1;
1627         intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
1628
1629         x = plane_state->view.color_plane[color_plane].x;
1630
1631         /*
1632          * From Bspec: UV surface Start Y Position = half of Y plane Y
1633          * start position.
1634          */
1635         if (!color_plane)
1636                 y = plane_state->view.color_plane[color_plane].y + clip->y1;
1637         else
1638                 y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
1639
1640         val = y << 16 | x;
1641
1642         intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
1643                           val);
1644
1645         /* Sizes are 0 based */
1646         val = (drm_rect_height(clip) - 1) << 16;
1647         val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
1648         intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
1649 }
1650
1651 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1652 {
1653         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1654         struct intel_encoder *encoder;
1655
1656         if (!crtc_state->enable_psr2_sel_fetch)
1657                 return;
1658
1659         for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
1660                                              crtc_state->uapi.encoder_mask) {
1661                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1662
1663                 lockdep_assert_held(&intel_dp->psr.lock);
1664                 if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
1665                         return;
1666                 break;
1667         }
1668
1669         intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder),
1670                        crtc_state->psr2_man_track_ctl);
1671 }
1672
1673 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
1674                                   struct drm_rect *clip, bool full_update)
1675 {
1676         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1677         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1678         u32 val = man_trk_ctl_enable_bit_get(dev_priv);
1679
1680         /* SF partial frame enable has to be set even on full update */
1681         val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
1682
1683         if (full_update) {
1684                 val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
1685                 val |= man_trk_ctl_continuos_full_frame(dev_priv);
1686                 goto exit;
1687         }
1688
1689         if (clip->y1 == -1)
1690                 goto exit;
1691
1692         if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
1693                 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
1694                 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
1695         } else {
1696                 drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
1697
1698                 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1699                 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
1700         }
1701 exit:
1702         crtc_state->psr2_man_track_ctl = val;
1703 }
1704
1705 static void clip_area_update(struct drm_rect *overlap_damage_area,
1706                              struct drm_rect *damage_area,
1707                              struct drm_rect *pipe_src)
1708 {
1709         if (!drm_rect_intersect(damage_area, pipe_src))
1710                 return;
1711
1712         if (overlap_damage_area->y1 == -1) {
1713                 overlap_damage_area->y1 = damage_area->y1;
1714                 overlap_damage_area->y2 = damage_area->y2;
1715                 return;
1716         }
1717
1718         if (damage_area->y1 < overlap_damage_area->y1)
1719                 overlap_damage_area->y1 = damage_area->y1;
1720
1721         if (damage_area->y2 > overlap_damage_area->y2)
1722                 overlap_damage_area->y2 = damage_area->y2;
1723 }
1724
1725 static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
1726                                                 struct drm_rect *pipe_clip)
1727 {
1728         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1729         const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1730         u16 y_alignment;
1731
1732         /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
1733         if (crtc_state->dsc.compression_enable &&
1734             (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
1735                 y_alignment = vdsc_cfg->slice_height;
1736         else
1737                 y_alignment = crtc_state->su_y_granularity;
1738
1739         pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
1740         if (pipe_clip->y2 % y_alignment)
1741                 pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
1742 }
1743
1744 /*
1745  * TODO: Not clear how to handle planes with negative position,
1746  * also planes are not updated if they have a negative X
1747  * position so for now doing a full update in this cases
1748  *
1749  * Plane scaling and rotation is not supported by selective fetch and both
1750  * properties can change without a modeset, so need to be check at every
1751  * atomic commit.
1752  */
1753 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
1754 {
1755         if (plane_state->uapi.dst.y1 < 0 ||
1756             plane_state->uapi.dst.x1 < 0 ||
1757             plane_state->scaler_id >= 0 ||
1758             plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
1759                 return false;
1760
1761         return true;
1762 }
1763
1764 /*
1765  * Check for pipe properties that is not supported by selective fetch.
1766  *
1767  * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
1768  * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
1769  * enabled and going to the full update path.
1770  */
1771 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
1772 {
1773         if (crtc_state->scaler_state.scaler_id >= 0)
1774                 return false;
1775
1776         return true;
1777 }
1778
1779 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
1780                                 struct intel_crtc *crtc)
1781 {
1782         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1783         struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1784         struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
1785         struct intel_plane_state *new_plane_state, *old_plane_state;
1786         struct intel_plane *plane;
1787         bool full_update = false;
1788         int i, ret;
1789
1790         if (!crtc_state->enable_psr2_sel_fetch)
1791                 return 0;
1792
1793         if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
1794                 full_update = true;
1795                 goto skip_sel_fetch_set_loop;
1796         }
1797
1798         /*
1799          * Calculate minimal selective fetch area of each plane and calculate
1800          * the pipe damaged area.
1801          * In the next loop the plane selective fetch area will actually be set
1802          * using whole pipe damaged area.
1803          */
1804         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
1805                                              new_plane_state, i) {
1806                 struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
1807                                                       .x2 = INT_MAX };
1808
1809                 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
1810                         continue;
1811
1812                 if (!new_plane_state->uapi.visible &&
1813                     !old_plane_state->uapi.visible)
1814                         continue;
1815
1816                 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
1817                         full_update = true;
1818                         break;
1819                 }
1820
1821                 /*
1822                  * If visibility or plane moved, mark the whole plane area as
1823                  * damaged as it needs to be complete redraw in the new and old
1824                  * position.
1825                  */
1826                 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
1827                     !drm_rect_equals(&new_plane_state->uapi.dst,
1828                                      &old_plane_state->uapi.dst)) {
1829                         if (old_plane_state->uapi.visible) {
1830                                 damaged_area.y1 = old_plane_state->uapi.dst.y1;
1831                                 damaged_area.y2 = old_plane_state->uapi.dst.y2;
1832                                 clip_area_update(&pipe_clip, &damaged_area,
1833                                                  &crtc_state->pipe_src);
1834                         }
1835
1836                         if (new_plane_state->uapi.visible) {
1837                                 damaged_area.y1 = new_plane_state->uapi.dst.y1;
1838                                 damaged_area.y2 = new_plane_state->uapi.dst.y2;
1839                                 clip_area_update(&pipe_clip, &damaged_area,
1840                                                  &crtc_state->pipe_src);
1841                         }
1842                         continue;
1843                 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
1844                         /* If alpha changed mark the whole plane area as damaged */
1845                         damaged_area.y1 = new_plane_state->uapi.dst.y1;
1846                         damaged_area.y2 = new_plane_state->uapi.dst.y2;
1847                         clip_area_update(&pipe_clip, &damaged_area,
1848                                          &crtc_state->pipe_src);
1849                         continue;
1850                 }
1851
1852                 src = drm_plane_state_src(&new_plane_state->uapi);
1853                 drm_rect_fp_to_int(&src, &src);
1854
1855                 if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
1856                                                      &new_plane_state->uapi, &damaged_area))
1857                         continue;
1858
1859                 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
1860                 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
1861                 damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
1862                 damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
1863
1864                 clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
1865         }
1866
1867         /*
1868          * TODO: For now we are just using full update in case
1869          * selective fetch area calculation fails. To optimize this we
1870          * should identify cases where this happens and fix the area
1871          * calculation for those.
1872          */
1873         if (pipe_clip.y1 == -1) {
1874                 drm_info_once(&dev_priv->drm,
1875                               "Selective fetch area calculation failed in pipe %c\n",
1876                               pipe_name(crtc->pipe));
1877                 full_update = true;
1878         }
1879
1880         if (full_update)
1881                 goto skip_sel_fetch_set_loop;
1882
1883         /* Wa_14014971492 */
1884         if ((IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
1885              IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
1886             crtc_state->splitter.enable)
1887                 pipe_clip.y1 = 0;
1888
1889         ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
1890         if (ret)
1891                 return ret;
1892
1893         intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
1894
1895         /*
1896          * Now that we have the pipe damaged area check if it intersect with
1897          * every plane, if it does set the plane selective fetch area.
1898          */
1899         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
1900                                              new_plane_state, i) {
1901                 struct drm_rect *sel_fetch_area, inter;
1902                 struct intel_plane *linked = new_plane_state->planar_linked_plane;
1903
1904                 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
1905                     !new_plane_state->uapi.visible)
1906                         continue;
1907
1908                 inter = pipe_clip;
1909                 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
1910                         continue;
1911
1912                 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
1913                         full_update = true;
1914                         break;
1915                 }
1916
1917                 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
1918                 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
1919                 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
1920                 crtc_state->update_planes |= BIT(plane->id);
1921
1922                 /*
1923                  * Sel_fetch_area is calculated for UV plane. Use
1924                  * same area for Y plane as well.
1925                  */
1926                 if (linked) {
1927                         struct intel_plane_state *linked_new_plane_state;
1928                         struct drm_rect *linked_sel_fetch_area;
1929
1930                         linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
1931                         if (IS_ERR(linked_new_plane_state))
1932                                 return PTR_ERR(linked_new_plane_state);
1933
1934                         linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
1935                         linked_sel_fetch_area->y1 = sel_fetch_area->y1;
1936                         linked_sel_fetch_area->y2 = sel_fetch_area->y2;
1937                         crtc_state->update_planes |= BIT(linked->id);
1938                 }
1939         }
1940
1941 skip_sel_fetch_set_loop:
1942         psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
1943         return 0;
1944 }
1945
1946 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
1947                                 struct intel_crtc *crtc)
1948 {
1949         struct drm_i915_private *i915 = to_i915(state->base.dev);
1950         const struct intel_crtc_state *old_crtc_state =
1951                 intel_atomic_get_old_crtc_state(state, crtc);
1952         const struct intel_crtc_state *new_crtc_state =
1953                 intel_atomic_get_new_crtc_state(state, crtc);
1954         struct intel_encoder *encoder;
1955
1956         if (!HAS_PSR(i915))
1957                 return;
1958
1959         for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
1960                                              old_crtc_state->uapi.encoder_mask) {
1961                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1962                 struct intel_psr *psr = &intel_dp->psr;
1963                 bool needs_to_disable = false;
1964
1965                 mutex_lock(&psr->lock);
1966
1967                 /*
1968                  * Reasons to disable:
1969                  * - PSR disabled in new state
1970                  * - All planes will go inactive
1971                  * - Changing between PSR versions
1972                  * - Display WA #1136: skl, bxt
1973                  */
1974                 needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
1975                 needs_to_disable |= !new_crtc_state->has_psr;
1976                 needs_to_disable |= !new_crtc_state->active_planes;
1977                 needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
1978                 needs_to_disable |= DISPLAY_VER(i915) < 11 &&
1979                         new_crtc_state->wm_level_disabled;
1980
1981                 if (psr->enabled && needs_to_disable)
1982                         intel_psr_disable_locked(intel_dp);
1983                 else if (psr->enabled && new_crtc_state->wm_level_disabled)
1984                         /* Wa_14015648006 */
1985                         wm_optimization_wa(intel_dp, new_crtc_state);
1986
1987                 mutex_unlock(&psr->lock);
1988         }
1989 }
1990
1991 static void _intel_psr_post_plane_update(const struct intel_atomic_state *state,
1992                                          const struct intel_crtc_state *crtc_state)
1993 {
1994         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1995         struct intel_encoder *encoder;
1996
1997         if (!crtc_state->has_psr)
1998                 return;
1999
2000         for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2001                                              crtc_state->uapi.encoder_mask) {
2002                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2003                 struct intel_psr *psr = &intel_dp->psr;
2004                 bool keep_disabled = false;
2005
2006                 mutex_lock(&psr->lock);
2007
2008                 drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2009
2010                 keep_disabled |= psr->sink_not_reliable;
2011                 keep_disabled |= !crtc_state->active_planes;
2012
2013                 /* Display WA #1136: skl, bxt */
2014                 keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2015                         crtc_state->wm_level_disabled;
2016
2017                 if (!psr->enabled && !keep_disabled)
2018                         intel_psr_enable_locked(intel_dp, crtc_state);
2019                 else if (psr->enabled && !crtc_state->wm_level_disabled)
2020                         /* Wa_14015648006 */
2021                         wm_optimization_wa(intel_dp, crtc_state);
2022
2023                 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2024                 if (crtc_state->crc_enabled && psr->enabled)
2025                         psr_force_hw_tracking_exit(intel_dp);
2026
2027                 mutex_unlock(&psr->lock);
2028         }
2029 }
2030
2031 void intel_psr_post_plane_update(const struct intel_atomic_state *state)
2032 {
2033         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2034         struct intel_crtc_state *crtc_state;
2035         struct intel_crtc *crtc;
2036         int i;
2037
2038         if (!HAS_PSR(dev_priv))
2039                 return;
2040
2041         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
2042                 _intel_psr_post_plane_update(state, crtc_state);
2043 }
2044
2045 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2046 {
2047         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2048
2049         /*
2050          * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2051          * As all higher states has bit 4 of PSR2 state set we can just wait for
2052          * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2053          */
2054         return intel_de_wait_for_clear(dev_priv,
2055                                        EDP_PSR2_STATUS(intel_dp->psr.transcoder),
2056                                        EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2057 }
2058
2059 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2060 {
2061         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2062
2063         /*
2064          * From bspec: Panel Self Refresh (BDW+)
2065          * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2066          * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2067          * defensive enough to cover everything.
2068          */
2069         return intel_de_wait_for_clear(dev_priv,
2070                                        EDP_PSR_STATUS(intel_dp->psr.transcoder),
2071                                        EDP_PSR_STATUS_STATE_MASK, 50);
2072 }
2073
2074 /**
2075  * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2076  * @new_crtc_state: new CRTC state
2077  *
2078  * This function is expected to be called from pipe_update_start() where it is
2079  * not expected to race with PSR enable or disable.
2080  */
2081 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2082 {
2083         struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2084         struct intel_encoder *encoder;
2085
2086         if (!new_crtc_state->has_psr)
2087                 return;
2088
2089         for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2090                                              new_crtc_state->uapi.encoder_mask) {
2091                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2092                 int ret;
2093
2094                 lockdep_assert_held(&intel_dp->psr.lock);
2095
2096                 if (!intel_dp->psr.enabled)
2097                         continue;
2098
2099                 if (intel_dp->psr.psr2_enabled)
2100                         ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2101                 else
2102                         ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2103
2104                 if (ret)
2105                         drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2106         }
2107 }
2108
2109 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2110 {
2111         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2112         i915_reg_t reg;
2113         u32 mask;
2114         int err;
2115
2116         if (!intel_dp->psr.enabled)
2117                 return false;
2118
2119         if (intel_dp->psr.psr2_enabled) {
2120                 reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
2121                 mask = EDP_PSR2_STATUS_STATE_MASK;
2122         } else {
2123                 reg = EDP_PSR_STATUS(intel_dp->psr.transcoder);
2124                 mask = EDP_PSR_STATUS_STATE_MASK;
2125         }
2126
2127         mutex_unlock(&intel_dp->psr.lock);
2128
2129         err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2130         if (err)
2131                 drm_err(&dev_priv->drm,
2132                         "Timed out waiting for PSR Idle for re-enable\n");
2133
2134         /* After the unlocked wait, verify that PSR is still wanted! */
2135         mutex_lock(&intel_dp->psr.lock);
2136         return err == 0 && intel_dp->psr.enabled;
2137 }
2138
2139 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2140 {
2141         struct drm_connector_list_iter conn_iter;
2142         struct drm_modeset_acquire_ctx ctx;
2143         struct drm_atomic_state *state;
2144         struct drm_connector *conn;
2145         int err = 0;
2146
2147         state = drm_atomic_state_alloc(&dev_priv->drm);
2148         if (!state)
2149                 return -ENOMEM;
2150
2151         drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2152         state->acquire_ctx = &ctx;
2153
2154 retry:
2155
2156         drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2157         drm_for_each_connector_iter(conn, &conn_iter) {
2158                 struct drm_connector_state *conn_state;
2159                 struct drm_crtc_state *crtc_state;
2160
2161                 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2162                         continue;
2163
2164                 conn_state = drm_atomic_get_connector_state(state, conn);
2165                 if (IS_ERR(conn_state)) {
2166                         err = PTR_ERR(conn_state);
2167                         break;
2168                 }
2169
2170                 if (!conn_state->crtc)
2171                         continue;
2172
2173                 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2174                 if (IS_ERR(crtc_state)) {
2175                         err = PTR_ERR(crtc_state);
2176                         break;
2177                 }
2178
2179                 /* Mark mode as changed to trigger a pipe->update() */
2180                 crtc_state->mode_changed = true;
2181         }
2182         drm_connector_list_iter_end(&conn_iter);
2183
2184         if (err == 0)
2185                 err = drm_atomic_commit(state);
2186
2187         if (err == -EDEADLK) {
2188                 drm_atomic_state_clear(state);
2189                 err = drm_modeset_backoff(&ctx);
2190                 if (!err)
2191                         goto retry;
2192         }
2193
2194         drm_modeset_drop_locks(&ctx);
2195         drm_modeset_acquire_fini(&ctx);
2196         drm_atomic_state_put(state);
2197
2198         return err;
2199 }
2200
2201 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2202 {
2203         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2204         const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2205         u32 old_mode;
2206         int ret;
2207
2208         if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2209             mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2210                 drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2211                 return -EINVAL;
2212         }
2213
2214         ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2215         if (ret)
2216                 return ret;
2217
2218         old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2219         intel_dp->psr.debug = val;
2220
2221         /*
2222          * Do it right away if it's already enabled, otherwise it will be done
2223          * when enabling the source.
2224          */
2225         if (intel_dp->psr.enabled)
2226                 psr_irq_control(intel_dp);
2227
2228         mutex_unlock(&intel_dp->psr.lock);
2229
2230         if (old_mode != mode)
2231                 ret = intel_psr_fastset_force(dev_priv);
2232
2233         return ret;
2234 }
2235
2236 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2237 {
2238         struct intel_psr *psr = &intel_dp->psr;
2239
2240         intel_psr_disable_locked(intel_dp);
2241         psr->sink_not_reliable = true;
2242         /* let's make sure that sink is awaken */
2243         drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2244 }
2245
2246 static void intel_psr_work(struct work_struct *work)
2247 {
2248         struct intel_dp *intel_dp =
2249                 container_of(work, typeof(*intel_dp), psr.work);
2250
2251         mutex_lock(&intel_dp->psr.lock);
2252
2253         if (!intel_dp->psr.enabled)
2254                 goto unlock;
2255
2256         if (READ_ONCE(intel_dp->psr.irq_aux_error))
2257                 intel_psr_handle_irq(intel_dp);
2258
2259         /*
2260          * We have to make sure PSR is ready for re-enable
2261          * otherwise it keeps disabled until next full enable/disable cycle.
2262          * PSR might take some time to get fully disabled
2263          * and be ready for re-enable.
2264          */
2265         if (!__psr_wait_for_idle_locked(intel_dp))
2266                 goto unlock;
2267
2268         /*
2269          * The delayed work can race with an invalidate hence we need to
2270          * recheck. Since psr_flush first clears this and then reschedules we
2271          * won't ever miss a flush when bailing out here.
2272          */
2273         if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2274                 goto unlock;
2275
2276         intel_psr_activate(intel_dp);
2277 unlock:
2278         mutex_unlock(&intel_dp->psr.lock);
2279 }
2280
2281 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2282 {
2283         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2284
2285         if (intel_dp->psr.psr2_sel_fetch_enabled) {
2286                 u32 val;
2287
2288                 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2289                         /* Send one update otherwise lag is observed in screen */
2290                         intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2291                         return;
2292                 }
2293
2294                 val = man_trk_ctl_enable_bit_get(dev_priv) |
2295                       man_trk_ctl_partial_frame_bit_get(dev_priv) |
2296                       man_trk_ctl_continuos_full_frame(dev_priv);
2297                 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), val);
2298                 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2299                 intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
2300         } else {
2301                 intel_psr_exit(intel_dp);
2302         }
2303 }
2304
2305 /**
2306  * intel_psr_invalidate - Invalidate PSR
2307  * @dev_priv: i915 device
2308  * @frontbuffer_bits: frontbuffer plane tracking bits
2309  * @origin: which operation caused the invalidate
2310  *
2311  * Since the hardware frontbuffer tracking has gaps we need to integrate
2312  * with the software frontbuffer tracking. This function gets called every
2313  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2314  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2315  *
2316  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2317  */
2318 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2319                           unsigned frontbuffer_bits, enum fb_op_origin origin)
2320 {
2321         struct intel_encoder *encoder;
2322
2323         if (origin == ORIGIN_FLIP)
2324                 return;
2325
2326         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2327                 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2328                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2329
2330                 mutex_lock(&intel_dp->psr.lock);
2331                 if (!intel_dp->psr.enabled) {
2332                         mutex_unlock(&intel_dp->psr.lock);
2333                         continue;
2334                 }
2335
2336                 pipe_frontbuffer_bits &=
2337                         INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2338                 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2339
2340                 if (pipe_frontbuffer_bits)
2341                         _psr_invalidate_handle(intel_dp);
2342
2343                 mutex_unlock(&intel_dp->psr.lock);
2344         }
2345 }
2346 /*
2347  * When we will be completely rely on PSR2 S/W tracking in future,
2348  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2349  * event also therefore tgl_dc3co_flush_locked() require to be changed
2350  * accordingly in future.
2351  */
2352 static void
2353 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2354                        enum fb_op_origin origin)
2355 {
2356         if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
2357             !intel_dp->psr.active)
2358                 return;
2359
2360         /*
2361          * At every frontbuffer flush flip event modified delay of delayed work,
2362          * when delayed work schedules that means display has been idle.
2363          */
2364         if (!(frontbuffer_bits &
2365             INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2366                 return;
2367
2368         tgl_psr2_enable_dc3co(intel_dp);
2369         mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
2370                          intel_dp->psr.dc3co_exit_delay);
2371 }
2372
2373 static void _psr_flush_handle(struct intel_dp *intel_dp)
2374 {
2375         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2376
2377         if (intel_dp->psr.psr2_sel_fetch_enabled) {
2378                 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2379                         /* can we turn CFF off? */
2380                         if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2381                                 u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2382                                         man_trk_ctl_partial_frame_bit_get(dev_priv) |
2383                                         man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2384                                         man_trk_ctl_continuos_full_frame(dev_priv);
2385
2386                                 /*
2387                                  * Set psr2_sel_fetch_cff_enabled as false to allow selective
2388                                  * updates. Still keep cff bit enabled as we don't have proper
2389                                  * SU configuration in case update is sent for any reason after
2390                                  * sff bit gets cleared by the HW on next vblank.
2391                                  */
2392                                 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
2393                                                val);
2394                                 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2395                                 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2396                         }
2397                 } else {
2398                         /*
2399                          * continuous full frame is disabled, only a single full
2400                          * frame is required
2401                          */
2402                         psr_force_hw_tracking_exit(intel_dp);
2403                 }
2404         } else {
2405                 psr_force_hw_tracking_exit(intel_dp);
2406
2407                 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2408                         schedule_work(&intel_dp->psr.work);
2409         }
2410 }
2411
2412 /**
2413  * intel_psr_flush - Flush PSR
2414  * @dev_priv: i915 device
2415  * @frontbuffer_bits: frontbuffer plane tracking bits
2416  * @origin: which operation caused the flush
2417  *
2418  * Since the hardware frontbuffer tracking has gaps we need to integrate
2419  * with the software frontbuffer tracking. This function gets called every
2420  * time frontbuffer rendering has completed and flushed out to memory. PSR
2421  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2422  *
2423  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2424  */
2425 void intel_psr_flush(struct drm_i915_private *dev_priv,
2426                      unsigned frontbuffer_bits, enum fb_op_origin origin)
2427 {
2428         struct intel_encoder *encoder;
2429
2430         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2431                 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2432                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2433
2434                 mutex_lock(&intel_dp->psr.lock);
2435                 if (!intel_dp->psr.enabled) {
2436                         mutex_unlock(&intel_dp->psr.lock);
2437                         continue;
2438                 }
2439
2440                 pipe_frontbuffer_bits &=
2441                         INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2442                 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2443
2444                 /*
2445                  * If the PSR is paused by an explicit intel_psr_paused() call,
2446                  * we have to ensure that the PSR is not activated until
2447                  * intel_psr_resume() is called.
2448                  */
2449                 if (intel_dp->psr.paused)
2450                         goto unlock;
2451
2452                 if (origin == ORIGIN_FLIP ||
2453                     (origin == ORIGIN_CURSOR_UPDATE &&
2454                      !intel_dp->psr.psr2_sel_fetch_enabled)) {
2455                         tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
2456                         goto unlock;
2457                 }
2458
2459                 if (pipe_frontbuffer_bits == 0)
2460                         goto unlock;
2461
2462                 /* By definition flush = invalidate + flush */
2463                 _psr_flush_handle(intel_dp);
2464 unlock:
2465                 mutex_unlock(&intel_dp->psr.lock);
2466         }
2467 }
2468
2469 /**
2470  * intel_psr_init - Init basic PSR work and mutex.
2471  * @intel_dp: Intel DP
2472  *
2473  * This function is called after the initializing connector.
2474  * (the initializing of connector treats the handling of connector capabilities)
2475  * And it initializes basic PSR stuff for each DP Encoder.
2476  */
2477 void intel_psr_init(struct intel_dp *intel_dp)
2478 {
2479         struct intel_connector *connector = intel_dp->attached_connector;
2480         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2481         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2482
2483         if (!HAS_PSR(dev_priv))
2484                 return;
2485
2486         /*
2487          * HSW spec explicitly says PSR is tied to port A.
2488          * BDW+ platforms have a instance of PSR registers per transcoder but
2489          * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2490          * than eDP one.
2491          * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2492          * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2493          * But GEN12 supports a instance of PSR registers per transcoder.
2494          */
2495         if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2496                 drm_dbg_kms(&dev_priv->drm,
2497                             "PSR condition failed: Port not supported\n");
2498                 return;
2499         }
2500
2501         intel_dp->psr.source_support = true;
2502
2503         /* Set link_standby x link_off defaults */
2504         if (DISPLAY_VER(dev_priv) < 12)
2505                 /* For new platforms up to TGL let's respect VBT back again */
2506                 intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
2507
2508         INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2509         INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2510         mutex_init(&intel_dp->psr.lock);
2511 }
2512
2513 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2514                                            u8 *status, u8 *error_status)
2515 {
2516         struct drm_dp_aux *aux = &intel_dp->aux;
2517         int ret;
2518
2519         ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
2520         if (ret != 1)
2521                 return ret;
2522
2523         ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
2524         if (ret != 1)
2525                 return ret;
2526
2527         *status = *status & DP_PSR_SINK_STATE_MASK;
2528
2529         return 0;
2530 }
2531
2532 static void psr_alpm_check(struct intel_dp *intel_dp)
2533 {
2534         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2535         struct drm_dp_aux *aux = &intel_dp->aux;
2536         struct intel_psr *psr = &intel_dp->psr;
2537         u8 val;
2538         int r;
2539
2540         if (!psr->psr2_enabled)
2541                 return;
2542
2543         r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2544         if (r != 1) {
2545                 drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2546                 return;
2547         }
2548
2549         if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2550                 intel_psr_disable_locked(intel_dp);
2551                 psr->sink_not_reliable = true;
2552                 drm_dbg_kms(&dev_priv->drm,
2553                             "ALPM lock timeout error, disabling PSR\n");
2554
2555                 /* Clearing error */
2556                 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
2557         }
2558 }
2559
2560 static void psr_capability_changed_check(struct intel_dp *intel_dp)
2561 {
2562         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2563         struct intel_psr *psr = &intel_dp->psr;
2564         u8 val;
2565         int r;
2566
2567         r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
2568         if (r != 1) {
2569                 drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2570                 return;
2571         }
2572
2573         if (val & DP_PSR_CAPS_CHANGE) {
2574                 intel_psr_disable_locked(intel_dp);
2575                 psr->sink_not_reliable = true;
2576                 drm_dbg_kms(&dev_priv->drm,
2577                             "Sink PSR capability changed, disabling PSR\n");
2578
2579                 /* Clearing it */
2580                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
2581         }
2582 }
2583
2584 void intel_psr_short_pulse(struct intel_dp *intel_dp)
2585 {
2586         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2587         struct intel_psr *psr = &intel_dp->psr;
2588         u8 status, error_status;
2589         const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2590                           DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
2591                           DP_PSR_LINK_CRC_ERROR;
2592
2593         if (!CAN_PSR(intel_dp))
2594                 return;
2595
2596         mutex_lock(&psr->lock);
2597
2598         if (!psr->enabled)
2599                 goto exit;
2600
2601         if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2602                 drm_err(&dev_priv->drm,
2603                         "Error reading PSR status or error status\n");
2604                 goto exit;
2605         }
2606
2607         if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
2608                 intel_psr_disable_locked(intel_dp);
2609                 psr->sink_not_reliable = true;
2610         }
2611
2612         if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
2613                 drm_dbg_kms(&dev_priv->drm,
2614                             "PSR sink internal error, disabling PSR\n");
2615         if (error_status & DP_PSR_RFB_STORAGE_ERROR)
2616                 drm_dbg_kms(&dev_priv->drm,
2617                             "PSR RFB storage error, disabling PSR\n");
2618         if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
2619                 drm_dbg_kms(&dev_priv->drm,
2620                             "PSR VSC SDP uncorrectable error, disabling PSR\n");
2621         if (error_status & DP_PSR_LINK_CRC_ERROR)
2622                 drm_dbg_kms(&dev_priv->drm,
2623                             "PSR Link CRC error, disabling PSR\n");
2624
2625         if (error_status & ~errors)
2626                 drm_err(&dev_priv->drm,
2627                         "PSR_ERROR_STATUS unhandled errors %x\n",
2628                         error_status & ~errors);
2629         /* clear status register */
2630         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2631
2632         psr_alpm_check(intel_dp);
2633         psr_capability_changed_check(intel_dp);
2634
2635 exit:
2636         mutex_unlock(&psr->lock);
2637 }
2638
2639 bool intel_psr_enabled(struct intel_dp *intel_dp)
2640 {
2641         bool ret;
2642
2643         if (!CAN_PSR(intel_dp))
2644                 return false;
2645
2646         mutex_lock(&intel_dp->psr.lock);
2647         ret = intel_dp->psr.enabled;
2648         mutex_unlock(&intel_dp->psr.lock);
2649
2650         return ret;
2651 }
2652
2653 /**
2654  * intel_psr_lock - grab PSR lock
2655  * @crtc_state: the crtc state
2656  *
2657  * This is initially meant to be used by around CRTC update, when
2658  * vblank sensitive registers are updated and we need grab the lock
2659  * before it to avoid vblank evasion.
2660  */
2661 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
2662 {
2663         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2664         struct intel_encoder *encoder;
2665
2666         if (!crtc_state->has_psr)
2667                 return;
2668
2669         for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2670                                              crtc_state->uapi.encoder_mask) {
2671                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2672
2673                 mutex_lock(&intel_dp->psr.lock);
2674                 break;
2675         }
2676 }
2677
2678 /**
2679  * intel_psr_unlock - release PSR lock
2680  * @crtc_state: the crtc state
2681  *
2682  * Release the PSR lock that was held during pipe update.
2683  */
2684 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
2685 {
2686         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2687         struct intel_encoder *encoder;
2688
2689         if (!crtc_state->has_psr)
2690                 return;
2691
2692         for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2693                                              crtc_state->uapi.encoder_mask) {
2694                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2695
2696                 mutex_unlock(&intel_dp->psr.lock);
2697                 break;
2698         }
2699 }
2700
2701 static void
2702 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
2703 {
2704         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2705         const char *status = "unknown";
2706         u32 val, status_val;
2707
2708         if (intel_dp->psr.psr2_enabled) {
2709                 static const char * const live_status[] = {
2710                         "IDLE",
2711                         "CAPTURE",
2712                         "CAPTURE_FS",
2713                         "SLEEP",
2714                         "BUFON_FW",
2715                         "ML_UP",
2716                         "SU_STANDBY",
2717                         "FAST_SLEEP",
2718                         "DEEP_SLEEP",
2719                         "BUF_ON",
2720                         "TG_ON"
2721                 };
2722                 val = intel_de_read(dev_priv,
2723                                     EDP_PSR2_STATUS(intel_dp->psr.transcoder));
2724                 status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
2725                 if (status_val < ARRAY_SIZE(live_status))
2726                         status = live_status[status_val];
2727         } else {
2728                 static const char * const live_status[] = {
2729                         "IDLE",
2730                         "SRDONACK",
2731                         "SRDENT",
2732                         "BUFOFF",
2733                         "BUFON",
2734                         "AUXACK",
2735                         "SRDOFFACK",
2736                         "SRDENT_ON",
2737                 };
2738                 val = intel_de_read(dev_priv,
2739                                     EDP_PSR_STATUS(intel_dp->psr.transcoder));
2740                 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2741                               EDP_PSR_STATUS_STATE_SHIFT;
2742                 if (status_val < ARRAY_SIZE(live_status))
2743                         status = live_status[status_val];
2744         }
2745
2746         seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2747 }
2748
2749 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
2750 {
2751         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2752         struct intel_psr *psr = &intel_dp->psr;
2753         intel_wakeref_t wakeref;
2754         const char *status;
2755         bool enabled;
2756         u32 val;
2757
2758         seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
2759         if (psr->sink_support)
2760                 seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
2761         seq_puts(m, "\n");
2762
2763         if (!psr->sink_support)
2764                 return 0;
2765
2766         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2767         mutex_lock(&psr->lock);
2768
2769         if (psr->enabled)
2770                 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2771         else
2772                 status = "disabled";
2773         seq_printf(m, "PSR mode: %s\n", status);
2774
2775         if (!psr->enabled) {
2776                 seq_printf(m, "PSR sink not reliable: %s\n",
2777                            str_yes_no(psr->sink_not_reliable));
2778
2779                 goto unlock;
2780         }
2781
2782         if (psr->psr2_enabled) {
2783                 val = intel_de_read(dev_priv,
2784                                     EDP_PSR2_CTL(intel_dp->psr.transcoder));
2785                 enabled = val & EDP_PSR2_ENABLE;
2786         } else {
2787                 val = intel_de_read(dev_priv,
2788                                     EDP_PSR_CTL(intel_dp->psr.transcoder));
2789                 enabled = val & EDP_PSR_ENABLE;
2790         }
2791         seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2792                    str_enabled_disabled(enabled), val);
2793         psr_source_status(intel_dp, m);
2794         seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2795                    psr->busy_frontbuffer_bits);
2796
2797         /*
2798          * SKL+ Perf counter is reset to 0 everytime DC state is entered
2799          */
2800         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2801                 val = intel_de_read(dev_priv,
2802                                     EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
2803                 val &= EDP_PSR_PERF_CNT_MASK;
2804                 seq_printf(m, "Performance counter: %u\n", val);
2805         }
2806
2807         if (psr->debug & I915_PSR_DEBUG_IRQ) {
2808                 seq_printf(m, "Last attempted entry at: %lld\n",
2809                            psr->last_entry_attempt);
2810                 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2811         }
2812
2813         if (psr->psr2_enabled) {
2814                 u32 su_frames_val[3];
2815                 int frame;
2816
2817                 /*
2818                  * Reading all 3 registers before hand to minimize crossing a
2819                  * frame boundary between register reads
2820                  */
2821                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
2822                         val = intel_de_read(dev_priv,
2823                                             PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
2824                         su_frames_val[frame / 3] = val;
2825                 }
2826
2827                 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2828
2829                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2830                         u32 su_blocks;
2831
2832                         su_blocks = su_frames_val[frame / 3] &
2833                                     PSR2_SU_STATUS_MASK(frame);
2834                         su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2835                         seq_printf(m, "%d\t%d\n", frame, su_blocks);
2836                 }
2837
2838                 seq_printf(m, "PSR2 selective fetch: %s\n",
2839                            str_enabled_disabled(psr->psr2_sel_fetch_enabled));
2840         }
2841
2842 unlock:
2843         mutex_unlock(&psr->lock);
2844         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2845
2846         return 0;
2847 }
2848
2849 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
2850 {
2851         struct drm_i915_private *dev_priv = m->private;
2852         struct intel_dp *intel_dp = NULL;
2853         struct intel_encoder *encoder;
2854
2855         if (!HAS_PSR(dev_priv))
2856                 return -ENODEV;
2857
2858         /* Find the first EDP which supports PSR */
2859         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2860                 intel_dp = enc_to_intel_dp(encoder);
2861                 break;
2862         }
2863
2864         if (!intel_dp)
2865                 return -ENODEV;
2866
2867         return intel_psr_status(m, intel_dp);
2868 }
2869 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
2870
2871 static int
2872 i915_edp_psr_debug_set(void *data, u64 val)
2873 {
2874         struct drm_i915_private *dev_priv = data;
2875         struct intel_encoder *encoder;
2876         intel_wakeref_t wakeref;
2877         int ret = -ENODEV;
2878
2879         if (!HAS_PSR(dev_priv))
2880                 return ret;
2881
2882         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2883                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2884
2885                 drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
2886
2887                 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2888
2889                 // TODO: split to each transcoder's PSR debug state
2890                 ret = intel_psr_debug_set(intel_dp, val);
2891
2892                 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2893         }
2894
2895         return ret;
2896 }
2897
2898 static int
2899 i915_edp_psr_debug_get(void *data, u64 *val)
2900 {
2901         struct drm_i915_private *dev_priv = data;
2902         struct intel_encoder *encoder;
2903
2904         if (!HAS_PSR(dev_priv))
2905                 return -ENODEV;
2906
2907         for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2908                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2909
2910                 // TODO: split to each transcoder's PSR debug state
2911                 *val = READ_ONCE(intel_dp->psr.debug);
2912                 return 0;
2913         }
2914
2915         return -ENODEV;
2916 }
2917
2918 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2919                         i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2920                         "%llu\n");
2921
2922 void intel_psr_debugfs_register(struct drm_i915_private *i915)
2923 {
2924         struct drm_minor *minor = i915->drm.primary;
2925
2926         debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
2927                             i915, &i915_edp_psr_debug_fops);
2928
2929         debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
2930                             i915, &i915_edp_psr_status_fops);
2931 }
2932
2933 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2934 {
2935         struct intel_connector *connector = m->private;
2936         struct intel_dp *intel_dp = intel_attached_dp(connector);
2937         static const char * const sink_status[] = {
2938                 "inactive",
2939                 "transition to active, capture and display",
2940                 "active, display from RFB",
2941                 "active, capture and display on sink device timings",
2942                 "transition to inactive, capture and display, timing re-sync",
2943                 "reserved",
2944                 "reserved",
2945                 "sink internal error",
2946         };
2947         const char *str;
2948         int ret;
2949         u8 val;
2950
2951         if (!CAN_PSR(intel_dp)) {
2952                 seq_puts(m, "PSR Unsupported\n");
2953                 return -ENODEV;
2954         }
2955
2956         if (connector->base.status != connector_status_connected)
2957                 return -ENODEV;
2958
2959         ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2960         if (ret != 1)
2961                 return ret < 0 ? ret : -EIO;
2962
2963         val &= DP_PSR_SINK_STATE_MASK;
2964         if (val < ARRAY_SIZE(sink_status))
2965                 str = sink_status[val];
2966         else
2967                 str = "unknown";
2968
2969         seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2970
2971         return 0;
2972 }
2973 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2974
2975 static int i915_psr_status_show(struct seq_file *m, void *data)
2976 {
2977         struct intel_connector *connector = m->private;
2978         struct intel_dp *intel_dp = intel_attached_dp(connector);
2979
2980         return intel_psr_status(m, intel_dp);
2981 }
2982 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
2983
2984 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
2985 {
2986         struct drm_i915_private *i915 = to_i915(connector->base.dev);
2987         struct dentry *root = connector->base.debugfs_entry;
2988
2989         if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
2990                 return;
2991
2992         debugfs_create_file("i915_psr_sink_status", 0444, root,
2993                             connector, &i915_psr_sink_status_fops);
2994
2995         if (HAS_PSR(i915))
2996                 debugfs_create_file("i915_psr_status", 0444, root,
2997                                     connector, &i915_psr_status_fops);
2998 }
This page took 0.212309 seconds and 4 git commands to generate.