]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/intel_psr.c
Merge tag 'drm-for-v4.17' of git://people.freedesktop.org/~airlied/linux
[linux.git] / drivers / gpu / drm / i915 / intel_psr.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23
24 /**
25  * DOC: Panel Self Refresh (PSR/SRD)
26  *
27  * Since Haswell Display controller supports Panel Self-Refresh on display
28  * panels witch have a remote frame buffer (RFB) implemented according to PSR
29  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
30  * when system is idle but display is on as it eliminates display refresh
31  * request to DDR memory completely as long as the frame buffer for that
32  * display is unchanged.
33  *
34  * Panel Self Refresh must be supported by both Hardware (source) and
35  * Panel (sink).
36  *
37  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
38  * to power down the link and memory controller. For DSI panels the same idea
39  * is called "manual mode".
40  *
41  * The implementation uses the hardware-based PSR support which automatically
42  * enters/exits self-refresh mode. The hardware takes care of sending the
43  * required DP aux message and could even retrain the link (that part isn't
44  * enabled yet though). The hardware also keeps track of any frontbuffer
45  * changes to know when to exit self-refresh mode again. Unfortunately that
46  * part doesn't work too well, hence why the i915 PSR support uses the
47  * software frontbuffer tracking to make sure it doesn't miss a screen
48  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
49  * get called by the frontbuffer tracking code. Note that because of locking
50  * issues the self-refresh re-enable code is done from a work queue, which
51  * must be correctly synchronized/cancelled when shutting down the pipe."
52  */
53
54 #include <drm/drmP.h>
55
56 #include "intel_drv.h"
57 #include "i915_drv.h"
58
59 static inline enum intel_display_power_domain
60 psr_aux_domain(struct intel_dp *intel_dp)
61 {
62         /* CNL HW requires corresponding AUX IOs to be powered up for PSR.
63          * However, for non-A AUX ports the corresponding non-EDP transcoders
64          * would have already enabled power well 2 and DC_OFF. This means we can
65          * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a
66          * specific AUX_IO reference without powering up any extra wells.
67          * Note that PSR is enabled only on Port A even though this function
68          * returns the correct domain for other ports too.
69          */
70         return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
71                                               intel_dp->aux_power_domain;
72 }
73
74 static void psr_aux_io_power_get(struct intel_dp *intel_dp)
75 {
76         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
77         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
78
79         if (INTEL_GEN(dev_priv) < 10)
80                 return;
81
82         intel_display_power_get(dev_priv, psr_aux_domain(intel_dp));
83 }
84
85 static void psr_aux_io_power_put(struct intel_dp *intel_dp)
86 {
87         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
88         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
89
90         if (INTEL_GEN(dev_priv) < 10)
91                 return;
92
93         intel_display_power_put(dev_priv, psr_aux_domain(intel_dp));
94 }
95
96 static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp)
97 {
98         uint8_t psr_caps = 0;
99
100         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1)
101                 return false;
102         return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
103 }
104
105 static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
106 {
107         uint8_t dprx = 0;
108
109         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
110                               &dprx) != 1)
111                 return false;
112         return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
113 }
114
115 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
116 {
117         uint8_t alpm_caps = 0;
118
119         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
120                               &alpm_caps) != 1)
121                 return false;
122         return alpm_caps & DP_ALPM_CAP;
123 }
124
125 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
126 {
127         struct drm_i915_private *dev_priv =
128                 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
129
130         drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
131                          sizeof(intel_dp->psr_dpcd));
132
133         if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
134                 dev_priv->psr.sink_support = true;
135                 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
136         }
137
138         if (INTEL_GEN(dev_priv) >= 9 &&
139             (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
140                 uint8_t frame_sync_cap;
141
142                 dev_priv->psr.sink_support = true;
143                 if (drm_dp_dpcd_readb(&intel_dp->aux,
144                                       DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
145                                       &frame_sync_cap) != 1)
146                         frame_sync_cap = 0;
147                 dev_priv->psr.aux_frame_sync = frame_sync_cap & DP_AUX_FRAME_SYNC_CAP;
148                 /* PSR2 needs frame sync as well */
149                 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
150                 DRM_DEBUG_KMS("PSR2 %s on sink",
151                               dev_priv->psr.psr2_support ? "supported" : "not supported");
152
153                 if (dev_priv->psr.psr2_support) {
154                         dev_priv->psr.y_cord_support =
155                                 intel_dp_get_y_cord_status(intel_dp);
156                         dev_priv->psr.colorimetry_support =
157                                 intel_dp_get_colorimetry_status(intel_dp);
158                         dev_priv->psr.alpm =
159                                 intel_dp_get_alpm_status(intel_dp);
160                 }
161         }
162 }
163
164 static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
165 {
166         struct drm_i915_private *dev_priv = to_i915(dev);
167         uint32_t val;
168
169         val = I915_READ(VLV_PSRSTAT(pipe)) &
170               VLV_EDP_PSR_CURR_STATE_MASK;
171         return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
172                (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
173 }
174
175 static void vlv_psr_setup_vsc(struct intel_dp *intel_dp,
176                               const struct intel_crtc_state *crtc_state)
177 {
178         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
179         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
180         uint32_t val;
181
182         /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
183         val  = I915_READ(VLV_VSCSDP(crtc->pipe));
184         val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
185         val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
186         I915_WRITE(VLV_VSCSDP(crtc->pipe), val);
187 }
188
189 static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
190                               const struct intel_crtc_state *crtc_state)
191 {
192         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
193         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
194         struct edp_vsc_psr psr_vsc;
195
196         if (dev_priv->psr.psr2_support) {
197                 /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
198                 memset(&psr_vsc, 0, sizeof(psr_vsc));
199                 psr_vsc.sdp_header.HB0 = 0;
200                 psr_vsc.sdp_header.HB1 = 0x7;
201                 if (dev_priv->psr.colorimetry_support &&
202                     dev_priv->psr.y_cord_support) {
203                         psr_vsc.sdp_header.HB2 = 0x5;
204                         psr_vsc.sdp_header.HB3 = 0x13;
205                 } else if (dev_priv->psr.y_cord_support) {
206                         psr_vsc.sdp_header.HB2 = 0x4;
207                         psr_vsc.sdp_header.HB3 = 0xe;
208                 } else {
209                         psr_vsc.sdp_header.HB2 = 0x3;
210                         psr_vsc.sdp_header.HB3 = 0xc;
211                 }
212         } else {
213                 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
214                 memset(&psr_vsc, 0, sizeof(psr_vsc));
215                 psr_vsc.sdp_header.HB0 = 0;
216                 psr_vsc.sdp_header.HB1 = 0x7;
217                 psr_vsc.sdp_header.HB2 = 0x2;
218                 psr_vsc.sdp_header.HB3 = 0x8;
219         }
220
221         intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state,
222                                         DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
223 }
224
225 static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
226 {
227         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
228                            DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
229 }
230
231 static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
232                                        enum port port)
233 {
234         if (INTEL_GEN(dev_priv) >= 9)
235                 return DP_AUX_CH_CTL(port);
236         else
237                 return EDP_PSR_AUX_CTL;
238 }
239
240 static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
241                                         enum port port, int index)
242 {
243         if (INTEL_GEN(dev_priv) >= 9)
244                 return DP_AUX_CH_DATA(port, index);
245         else
246                 return EDP_PSR_AUX_DATA(index);
247 }
248
249 static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
250 {
251         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
252         struct drm_device *dev = dig_port->base.base.dev;
253         struct drm_i915_private *dev_priv = to_i915(dev);
254         uint32_t aux_clock_divider;
255         i915_reg_t aux_ctl_reg;
256         static const uint8_t aux_msg[] = {
257                 [0] = DP_AUX_NATIVE_WRITE << 4,
258                 [1] = DP_SET_POWER >> 8,
259                 [2] = DP_SET_POWER & 0xff,
260                 [3] = 1 - 1,
261                 [4] = DP_SET_POWER_D0,
262         };
263         enum port port = dig_port->base.port;
264         u32 aux_ctl;
265         int i;
266
267         BUILD_BUG_ON(sizeof(aux_msg) > 20);
268
269         aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
270
271         /* Enable AUX frame sync at sink */
272         if (dev_priv->psr.aux_frame_sync)
273                 drm_dp_dpcd_writeb(&intel_dp->aux,
274                                 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
275                                 DP_AUX_FRAME_SYNC_ENABLE);
276         /* Enable ALPM at sink for psr2 */
277         if (dev_priv->psr.psr2_support && dev_priv->psr.alpm)
278                 drm_dp_dpcd_writeb(&intel_dp->aux,
279                                 DP_RECEIVER_ALPM_CONFIG,
280                                 DP_ALPM_ENABLE);
281         if (dev_priv->psr.link_standby)
282                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
283                                    DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
284         else
285                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
286                                    DP_PSR_ENABLE);
287
288         aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
289
290         /* Setup AUX registers */
291         for (i = 0; i < sizeof(aux_msg); i += 4)
292                 I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
293                            intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
294
295         aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
296                                              aux_clock_divider);
297         I915_WRITE(aux_ctl_reg, aux_ctl);
298 }
299
300 static void vlv_psr_enable_source(struct intel_dp *intel_dp,
301                                   const struct intel_crtc_state *crtc_state)
302 {
303         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
304         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
305         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
306
307         /* Transition from PSR_state 0 (disabled) to PSR_state 1 (inactive) */
308         I915_WRITE(VLV_PSRCTL(crtc->pipe),
309                    VLV_EDP_PSR_MODE_SW_TIMER |
310                    VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
311                    VLV_EDP_PSR_ENABLE);
312 }
313
314 static void vlv_psr_activate(struct intel_dp *intel_dp)
315 {
316         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
317         struct drm_device *dev = dig_port->base.base.dev;
318         struct drm_i915_private *dev_priv = to_i915(dev);
319         struct drm_crtc *crtc = dig_port->base.base.crtc;
320         enum pipe pipe = to_intel_crtc(crtc)->pipe;
321
322         /*
323          * Let's do the transition from PSR_state 1 (inactive) to
324          * PSR_state 2 (transition to active - static frame transmission).
325          * Then Hardware is responsible for the transition to
326          * PSR_state 3 (active - no Remote Frame Buffer (RFB) update).
327          */
328         I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
329                    VLV_EDP_PSR_ACTIVE_ENTRY);
330 }
331
332 static void hsw_activate_psr1(struct intel_dp *intel_dp)
333 {
334         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
335         struct drm_device *dev = dig_port->base.base.dev;
336         struct drm_i915_private *dev_priv = to_i915(dev);
337
338         uint32_t max_sleep_time = 0x1f;
339         /*
340          * Let's respect VBT in case VBT asks a higher idle_frame value.
341          * Let's use 6 as the minimum to cover all known cases including
342          * the off-by-one issue that HW has in some cases. Also there are
343          * cases where sink should be able to train
344          * with the 5 or 6 idle patterns.
345          */
346         uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
347         uint32_t val = EDP_PSR_ENABLE;
348
349         val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
350         val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
351
352         if (IS_HASWELL(dev_priv))
353                 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
354
355         if (dev_priv->psr.link_standby)
356                 val |= EDP_PSR_LINK_STANDBY;
357
358         if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
359                 val |= EDP_PSR_TP1_TIME_2500us;
360         else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
361                 val |= EDP_PSR_TP1_TIME_500us;
362         else if (dev_priv->vbt.psr.tp1_wakeup_time > 0)
363                 val |= EDP_PSR_TP1_TIME_100us;
364         else
365                 val |= EDP_PSR_TP1_TIME_0us;
366
367         if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
368                 val |= EDP_PSR_TP2_TP3_TIME_2500us;
369         else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
370                 val |= EDP_PSR_TP2_TP3_TIME_500us;
371         else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
372                 val |= EDP_PSR_TP2_TP3_TIME_100us;
373         else
374                 val |= EDP_PSR_TP2_TP3_TIME_0us;
375
376         if (intel_dp_source_supports_hbr2(intel_dp) &&
377             drm_dp_tps3_supported(intel_dp->dpcd))
378                 val |= EDP_PSR_TP1_TP3_SEL;
379         else
380                 val |= EDP_PSR_TP1_TP2_SEL;
381
382         val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
383         I915_WRITE(EDP_PSR_CTL, val);
384 }
385
386 static void hsw_activate_psr2(struct intel_dp *intel_dp)
387 {
388         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
389         struct drm_device *dev = dig_port->base.base.dev;
390         struct drm_i915_private *dev_priv = to_i915(dev);
391         /*
392          * Let's respect VBT in case VBT asks a higher idle_frame value.
393          * Let's use 6 as the minimum to cover all known cases including
394          * the off-by-one issue that HW has in some cases. Also there are
395          * cases where sink should be able to train
396          * with the 5 or 6 idle patterns.
397          */
398         uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
399         uint32_t val;
400         uint8_t sink_latency;
401
402         val = idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
403
404         /* FIXME: selective update is probably totally broken because it doesn't
405          * mesh at all with our frontbuffer tracking. And the hw alone isn't
406          * good enough. */
407         val |= EDP_PSR2_ENABLE |
408                 EDP_SU_TRACK_ENABLE;
409
410         if (drm_dp_dpcd_readb(&intel_dp->aux,
411                                 DP_SYNCHRONIZATION_LATENCY_IN_SINK,
412                                 &sink_latency) == 1) {
413                 sink_latency &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
414         } else {
415                 sink_latency = 0;
416         }
417         val |= EDP_PSR2_FRAME_BEFORE_SU(sink_latency + 1);
418
419         if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
420                 val |= EDP_PSR2_TP2_TIME_2500;
421         else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
422                 val |= EDP_PSR2_TP2_TIME_500;
423         else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
424                 val |= EDP_PSR2_TP2_TIME_100;
425         else
426                 val |= EDP_PSR2_TP2_TIME_50;
427
428         I915_WRITE(EDP_PSR2_CTL, val);
429 }
430
431 static void hsw_psr_activate(struct intel_dp *intel_dp)
432 {
433         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
434         struct drm_device *dev = dig_port->base.base.dev;
435         struct drm_i915_private *dev_priv = to_i915(dev);
436
437         /* On HSW+ after we enable PSR on source it will activate it
438          * as soon as it match configure idle_frame count. So
439          * we just actually enable it here on activation time.
440          */
441
442         /* psr1 and psr2 are mutually exclusive.*/
443         if (dev_priv->psr.psr2_support)
444                 hsw_activate_psr2(intel_dp);
445         else
446                 hsw_activate_psr1(intel_dp);
447 }
448
449 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
450                                     struct intel_crtc_state *crtc_state)
451 {
452         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
453         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
454         int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
455         int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
456         int psr_max_h = 0, psr_max_v = 0;
457
458         /*
459          * FIXME psr2_support is messed up. It's both computed
460          * dynamically during PSR enable, and extracted from sink
461          * caps during eDP detection.
462          */
463         if (!dev_priv->psr.psr2_support)
464                 return false;
465
466         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
467                 psr_max_h = 4096;
468                 psr_max_v = 2304;
469         } else if (IS_GEN9(dev_priv)) {
470                 psr_max_h = 3640;
471                 psr_max_v = 2304;
472         }
473
474         if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
475                 DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
476                               crtc_hdisplay, crtc_vdisplay,
477                               psr_max_h, psr_max_v);
478                 return false;
479         }
480
481         /*
482          * FIXME:enable psr2 only for y-cordinate psr2 panels
483          * After gtc implementation , remove this restriction.
484          */
485         if (!dev_priv->psr.y_cord_support) {
486                 DRM_DEBUG_KMS("PSR2 not enabled, panel does not support Y coordinate\n");
487                 return false;
488         }
489
490         return true;
491 }
492
493 void intel_psr_compute_config(struct intel_dp *intel_dp,
494                               struct intel_crtc_state *crtc_state)
495 {
496         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
497         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
498         const struct drm_display_mode *adjusted_mode =
499                 &crtc_state->base.adjusted_mode;
500         int psr_setup_time;
501
502         if (!CAN_PSR(dev_priv))
503                 return;
504
505         if (!i915_modparams.enable_psr) {
506                 DRM_DEBUG_KMS("PSR disable by flag\n");
507                 return;
508         }
509
510         /*
511          * HSW spec explicitly says PSR is tied to port A.
512          * BDW+ platforms with DDI implementation of PSR have different
513          * PSR registers per transcoder and we only implement transcoder EDP
514          * ones. Since by Display design transcoder EDP is tied to port A
515          * we can safely escape based on the port A.
516          */
517         if (HAS_DDI(dev_priv) && dig_port->base.port != PORT_A) {
518                 DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
519                 return;
520         }
521
522         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
523             !dev_priv->psr.link_standby) {
524                 DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
525                 return;
526         }
527
528         if (IS_HASWELL(dev_priv) &&
529             I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
530                       S3D_ENABLE) {
531                 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
532                 return;
533         }
534
535         if (IS_HASWELL(dev_priv) &&
536             adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
537                 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
538                 return;
539         }
540
541         psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
542         if (psr_setup_time < 0) {
543                 DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
544                               intel_dp->psr_dpcd[1]);
545                 return;
546         }
547
548         if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
549             adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
550                 DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
551                               psr_setup_time);
552                 return;
553         }
554
555         if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
556                 DRM_DEBUG_KMS("PSR condition failed: panel lacks power state control\n");
557                 return;
558         }
559
560         crtc_state->has_psr = true;
561         crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
562         DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : "");
563 }
564
565 static void intel_psr_activate(struct intel_dp *intel_dp)
566 {
567         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
568         struct drm_device *dev = intel_dig_port->base.base.dev;
569         struct drm_i915_private *dev_priv = to_i915(dev);
570
571         if (dev_priv->psr.psr2_support)
572                 WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
573         else
574                 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
575         WARN_ON(dev_priv->psr.active);
576         lockdep_assert_held(&dev_priv->psr.lock);
577
578         dev_priv->psr.activate(intel_dp);
579         dev_priv->psr.active = true;
580 }
581
582 static void hsw_psr_enable_source(struct intel_dp *intel_dp,
583                                   const struct intel_crtc_state *crtc_state)
584 {
585         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
586         struct drm_device *dev = dig_port->base.base.dev;
587         struct drm_i915_private *dev_priv = to_i915(dev);
588         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
589         u32 chicken;
590
591         psr_aux_io_power_get(intel_dp);
592
593         if (dev_priv->psr.psr2_support) {
594                 chicken = PSR2_VSC_ENABLE_PROG_HEADER;
595                 if (dev_priv->psr.y_cord_support)
596                         chicken |= PSR2_ADD_VERTICAL_LINE_COUNT;
597                 I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
598
599                 I915_WRITE(EDP_PSR_DEBUG,
600                            EDP_PSR_DEBUG_MASK_MEMUP |
601                            EDP_PSR_DEBUG_MASK_HPD |
602                            EDP_PSR_DEBUG_MASK_LPSP |
603                            EDP_PSR_DEBUG_MASK_MAX_SLEEP |
604                            EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
605         } else {
606                 /*
607                  * Per Spec: Avoid continuous PSR exit by masking MEMUP
608                  * and HPD. also mask LPSP to avoid dependency on other
609                  * drivers that might block runtime_pm besides
610                  * preventing  other hw tracking issues now we can rely
611                  * on frontbuffer tracking.
612                  */
613                 I915_WRITE(EDP_PSR_DEBUG,
614                            EDP_PSR_DEBUG_MASK_MEMUP |
615                            EDP_PSR_DEBUG_MASK_HPD |
616                            EDP_PSR_DEBUG_MASK_LPSP);
617         }
618 }
619
620 /**
621  * intel_psr_enable - Enable PSR
622  * @intel_dp: Intel DP
623  * @crtc_state: new CRTC state
624  *
625  * This function can only be called after the pipe is fully trained and enabled.
626  */
627 void intel_psr_enable(struct intel_dp *intel_dp,
628                       const struct intel_crtc_state *crtc_state)
629 {
630         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
631         struct drm_device *dev = intel_dig_port->base.base.dev;
632         struct drm_i915_private *dev_priv = to_i915(dev);
633
634         if (!crtc_state->has_psr)
635                 return;
636
637         if (WARN_ON(!CAN_PSR(dev_priv)))
638                 return;
639
640         WARN_ON(dev_priv->drrs.dp);
641         mutex_lock(&dev_priv->psr.lock);
642         if (dev_priv->psr.enabled) {
643                 DRM_DEBUG_KMS("PSR already in use\n");
644                 goto unlock;
645         }
646
647         dev_priv->psr.psr2_support = crtc_state->has_psr2;
648         dev_priv->psr.busy_frontbuffer_bits = 0;
649
650         dev_priv->psr.setup_vsc(intel_dp, crtc_state);
651         dev_priv->psr.enable_sink(intel_dp);
652         dev_priv->psr.enable_source(intel_dp, crtc_state);
653         dev_priv->psr.enabled = intel_dp;
654
655         if (INTEL_GEN(dev_priv) >= 9) {
656                 intel_psr_activate(intel_dp);
657         } else {
658                 /*
659                  * FIXME: Activation should happen immediately since this
660                  * function is just called after pipe is fully trained and
661                  * enabled.
662                  * However on some platforms we face issues when first
663                  * activation follows a modeset so quickly.
664                  *     - On VLV/CHV we get bank screen on first activation
665                  *     - On HSW/BDW we get a recoverable frozen screen until
666                  *       next exit-activate sequence.
667                  */
668                 schedule_delayed_work(&dev_priv->psr.work,
669                                       msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
670         }
671
672 unlock:
673         mutex_unlock(&dev_priv->psr.lock);
674 }
675
676 static void vlv_psr_disable(struct intel_dp *intel_dp,
677                             const struct intel_crtc_state *old_crtc_state)
678 {
679         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
680         struct drm_device *dev = intel_dig_port->base.base.dev;
681         struct drm_i915_private *dev_priv = to_i915(dev);
682         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
683         uint32_t val;
684
685         if (dev_priv->psr.active) {
686                 /* Put VLV PSR back to PSR_state 0 (disabled). */
687                 if (intel_wait_for_register(dev_priv,
688                                             VLV_PSRSTAT(crtc->pipe),
689                                             VLV_EDP_PSR_IN_TRANS,
690                                             0,
691                                             1))
692                         WARN(1, "PSR transition took longer than expected\n");
693
694                 val = I915_READ(VLV_PSRCTL(crtc->pipe));
695                 val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
696                 val &= ~VLV_EDP_PSR_ENABLE;
697                 val &= ~VLV_EDP_PSR_MODE_MASK;
698                 I915_WRITE(VLV_PSRCTL(crtc->pipe), val);
699
700                 dev_priv->psr.active = false;
701         } else {
702                 WARN_ON(vlv_is_psr_active_on_pipe(dev, crtc->pipe));
703         }
704 }
705
706 static void hsw_psr_disable(struct intel_dp *intel_dp,
707                             const struct intel_crtc_state *old_crtc_state)
708 {
709         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
710         struct drm_device *dev = intel_dig_port->base.base.dev;
711         struct drm_i915_private *dev_priv = to_i915(dev);
712
713         if (dev_priv->psr.active) {
714                 i915_reg_t psr_status;
715                 u32 psr_status_mask;
716
717                 if (dev_priv->psr.aux_frame_sync)
718                         drm_dp_dpcd_writeb(&intel_dp->aux,
719                                         DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
720                                         0);
721
722                 if (dev_priv->psr.psr2_support) {
723                         psr_status = EDP_PSR2_STATUS;
724                         psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
725
726                         I915_WRITE(EDP_PSR2_CTL,
727                                    I915_READ(EDP_PSR2_CTL) &
728                                    ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
729
730                 } else {
731                         psr_status = EDP_PSR_STATUS;
732                         psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
733
734                         I915_WRITE(EDP_PSR_CTL,
735                                    I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
736                 }
737
738                 /* Wait till PSR is idle */
739                 if (intel_wait_for_register(dev_priv,
740                                             psr_status, psr_status_mask, 0,
741                                             2000))
742                         DRM_ERROR("Timed out waiting for PSR Idle State\n");
743
744                 dev_priv->psr.active = false;
745         } else {
746                 if (dev_priv->psr.psr2_support)
747                         WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
748                 else
749                         WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
750         }
751
752         psr_aux_io_power_put(intel_dp);
753 }
754
755 /**
756  * intel_psr_disable - Disable PSR
757  * @intel_dp: Intel DP
758  * @old_crtc_state: old CRTC state
759  *
760  * This function needs to be called before disabling pipe.
761  */
762 void intel_psr_disable(struct intel_dp *intel_dp,
763                        const struct intel_crtc_state *old_crtc_state)
764 {
765         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
766         struct drm_device *dev = intel_dig_port->base.base.dev;
767         struct drm_i915_private *dev_priv = to_i915(dev);
768
769         if (!old_crtc_state->has_psr)
770                 return;
771
772         if (WARN_ON(!CAN_PSR(dev_priv)))
773                 return;
774
775         mutex_lock(&dev_priv->psr.lock);
776         if (!dev_priv->psr.enabled) {
777                 mutex_unlock(&dev_priv->psr.lock);
778                 return;
779         }
780
781         dev_priv->psr.disable_source(intel_dp, old_crtc_state);
782
783         /* Disable PSR on Sink */
784         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
785
786         dev_priv->psr.enabled = NULL;
787         mutex_unlock(&dev_priv->psr.lock);
788
789         cancel_delayed_work_sync(&dev_priv->psr.work);
790 }
791
792 static void intel_psr_work(struct work_struct *work)
793 {
794         struct drm_i915_private *dev_priv =
795                 container_of(work, typeof(*dev_priv), psr.work.work);
796         struct intel_dp *intel_dp = dev_priv->psr.enabled;
797         struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
798         enum pipe pipe = to_intel_crtc(crtc)->pipe;
799
800         /* We have to make sure PSR is ready for re-enable
801          * otherwise it keeps disabled until next full enable/disable cycle.
802          * PSR might take some time to get fully disabled
803          * and be ready for re-enable.
804          */
805         if (HAS_DDI(dev_priv)) {
806                 if (dev_priv->psr.psr2_support) {
807                         if (intel_wait_for_register(dev_priv,
808                                                     EDP_PSR2_STATUS,
809                                                     EDP_PSR2_STATUS_STATE_MASK,
810                                                     0,
811                                                     50)) {
812                                 DRM_ERROR("Timed out waiting for PSR2 Idle for re-enable\n");
813                                 return;
814                         }
815                 } else {
816                         if (intel_wait_for_register(dev_priv,
817                                                     EDP_PSR_STATUS,
818                                                     EDP_PSR_STATUS_STATE_MASK,
819                                                     0,
820                                                     50)) {
821                                 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
822                                 return;
823                         }
824                 }
825         } else {
826                 if (intel_wait_for_register(dev_priv,
827                                             VLV_PSRSTAT(pipe),
828                                             VLV_EDP_PSR_IN_TRANS,
829                                             0,
830                                             1)) {
831                         DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
832                         return;
833                 }
834         }
835         mutex_lock(&dev_priv->psr.lock);
836         intel_dp = dev_priv->psr.enabled;
837
838         if (!intel_dp)
839                 goto unlock;
840
841         /*
842          * The delayed work can race with an invalidate hence we need to
843          * recheck. Since psr_flush first clears this and then reschedules we
844          * won't ever miss a flush when bailing out here.
845          */
846         if (dev_priv->psr.busy_frontbuffer_bits)
847                 goto unlock;
848
849         intel_psr_activate(intel_dp);
850 unlock:
851         mutex_unlock(&dev_priv->psr.lock);
852 }
853
854 static void intel_psr_exit(struct drm_i915_private *dev_priv)
855 {
856         struct intel_dp *intel_dp = dev_priv->psr.enabled;
857         struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
858         enum pipe pipe = to_intel_crtc(crtc)->pipe;
859         u32 val;
860
861         if (!dev_priv->psr.active)
862                 return;
863
864         if (HAS_DDI(dev_priv)) {
865                 if (dev_priv->psr.aux_frame_sync)
866                         drm_dp_dpcd_writeb(&intel_dp->aux,
867                                         DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
868                                         0);
869                 if (dev_priv->psr.psr2_support) {
870                         val = I915_READ(EDP_PSR2_CTL);
871                         WARN_ON(!(val & EDP_PSR2_ENABLE));
872                         I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
873                 } else {
874                         val = I915_READ(EDP_PSR_CTL);
875                         WARN_ON(!(val & EDP_PSR_ENABLE));
876                         I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
877                 }
878         } else {
879                 val = I915_READ(VLV_PSRCTL(pipe));
880
881                 /*
882                  * Here we do the transition drirectly from
883                  * PSR_state 3 (active - no Remote Frame Buffer (RFB) update) to
884                  * PSR_state 5 (exit).
885                  * PSR State 4 (active with single frame update) can be skipped.
886                  * On PSR_state 5 (exit) Hardware is responsible to transition
887                  * back to PSR_state 1 (inactive).
888                  * Now we are at Same state after vlv_psr_enable_source.
889                  */
890                 val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
891                 I915_WRITE(VLV_PSRCTL(pipe), val);
892
893                 /*
894                  * Send AUX wake up - Spec says after transitioning to PSR
895                  * active we have to send AUX wake up by writing 01h in DPCD
896                  * 600h of sink device.
897                  * XXX: This might slow down the transition, but without this
898                  * HW doesn't complete the transition to PSR_state 1 and we
899                  * never get the screen updated.
900                  */
901                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
902                                    DP_SET_POWER_D0);
903         }
904
905         dev_priv->psr.active = false;
906 }
907
908 /**
909  * intel_psr_single_frame_update - Single Frame Update
910  * @dev_priv: i915 device
911  * @frontbuffer_bits: frontbuffer plane tracking bits
912  *
913  * Some platforms support a single frame update feature that is used to
914  * send and update only one frame on Remote Frame Buffer.
915  * So far it is only implemented for Valleyview and Cherryview because
916  * hardware requires this to be done before a page flip.
917  */
918 void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
919                                    unsigned frontbuffer_bits)
920 {
921         struct drm_crtc *crtc;
922         enum pipe pipe;
923         u32 val;
924
925         if (!CAN_PSR(dev_priv))
926                 return;
927
928         /*
929          * Single frame update is already supported on BDW+ but it requires
930          * many W/A and it isn't really needed.
931          */
932         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
933                 return;
934
935         mutex_lock(&dev_priv->psr.lock);
936         if (!dev_priv->psr.enabled) {
937                 mutex_unlock(&dev_priv->psr.lock);
938                 return;
939         }
940
941         crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
942         pipe = to_intel_crtc(crtc)->pipe;
943
944         if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
945                 val = I915_READ(VLV_PSRCTL(pipe));
946
947                 /*
948                  * We need to set this bit before writing registers for a flip.
949                  * This bit will be self-clear when it gets to the PSR active state.
950                  */
951                 I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
952         }
953         mutex_unlock(&dev_priv->psr.lock);
954 }
955
956 /**
957  * intel_psr_invalidate - Invalidade PSR
958  * @dev_priv: i915 device
959  * @frontbuffer_bits: frontbuffer plane tracking bits
960  *
961  * Since the hardware frontbuffer tracking has gaps we need to integrate
962  * with the software frontbuffer tracking. This function gets called every
963  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
964  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
965  *
966  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
967  */
968 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
969                           unsigned frontbuffer_bits)
970 {
971         struct drm_crtc *crtc;
972         enum pipe pipe;
973
974         if (!CAN_PSR(dev_priv))
975                 return;
976
977         mutex_lock(&dev_priv->psr.lock);
978         if (!dev_priv->psr.enabled) {
979                 mutex_unlock(&dev_priv->psr.lock);
980                 return;
981         }
982
983         crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
984         pipe = to_intel_crtc(crtc)->pipe;
985
986         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
987         dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
988
989         if (frontbuffer_bits)
990                 intel_psr_exit(dev_priv);
991
992         mutex_unlock(&dev_priv->psr.lock);
993 }
994
995 /**
996  * intel_psr_flush - Flush PSR
997  * @dev_priv: i915 device
998  * @frontbuffer_bits: frontbuffer plane tracking bits
999  * @origin: which operation caused the flush
1000  *
1001  * Since the hardware frontbuffer tracking has gaps we need to integrate
1002  * with the software frontbuffer tracking. This function gets called every
1003  * time frontbuffer rendering has completed and flushed out to memory. PSR
1004  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
1005  *
1006  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
1007  */
1008 void intel_psr_flush(struct drm_i915_private *dev_priv,
1009                      unsigned frontbuffer_bits, enum fb_op_origin origin)
1010 {
1011         struct drm_crtc *crtc;
1012         enum pipe pipe;
1013
1014         if (!CAN_PSR(dev_priv))
1015                 return;
1016
1017         mutex_lock(&dev_priv->psr.lock);
1018         if (!dev_priv->psr.enabled) {
1019                 mutex_unlock(&dev_priv->psr.lock);
1020                 return;
1021         }
1022
1023         crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
1024         pipe = to_intel_crtc(crtc)->pipe;
1025
1026         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
1027         dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
1028
1029         /* By definition flush = invalidate + flush */
1030         if (frontbuffer_bits)
1031                 intel_psr_exit(dev_priv);
1032
1033         if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
1034                 if (!work_busy(&dev_priv->psr.work.work))
1035                         schedule_delayed_work(&dev_priv->psr.work,
1036                                               msecs_to_jiffies(100));
1037         mutex_unlock(&dev_priv->psr.lock);
1038 }
1039
1040 /**
1041  * intel_psr_init - Init basic PSR work and mutex.
1042  * @dev_priv: i915 device private
1043  *
1044  * This function is  called only once at driver load to initialize basic
1045  * PSR stuff.
1046  */
1047 void intel_psr_init(struct drm_i915_private *dev_priv)
1048 {
1049         if (!HAS_PSR(dev_priv))
1050                 return;
1051
1052         dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
1053                 HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
1054
1055         if (!dev_priv->psr.sink_support)
1056                 return;
1057
1058         /* Per platform default: all disabled. */
1059         if (i915_modparams.enable_psr == -1)
1060                 i915_modparams.enable_psr = 0;
1061
1062         /* Set link_standby x link_off defaults */
1063         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1064                 /* HSW and BDW require workarounds that we don't implement. */
1065                 dev_priv->psr.link_standby = false;
1066         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1067                 /* On VLV and CHV only standby mode is supported. */
1068                 dev_priv->psr.link_standby = true;
1069         else
1070                 /* For new platforms let's respect VBT back again */
1071                 dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
1072
1073         /* Override link_standby x link_off defaults */
1074         if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) {
1075                 DRM_DEBUG_KMS("PSR: Forcing link standby\n");
1076                 dev_priv->psr.link_standby = true;
1077         }
1078         if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) {
1079                 DRM_DEBUG_KMS("PSR: Forcing main link off\n");
1080                 dev_priv->psr.link_standby = false;
1081         }
1082
1083         INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
1084         mutex_init(&dev_priv->psr.lock);
1085
1086         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1087                 dev_priv->psr.enable_source = vlv_psr_enable_source;
1088                 dev_priv->psr.disable_source = vlv_psr_disable;
1089                 dev_priv->psr.enable_sink = vlv_psr_enable_sink;
1090                 dev_priv->psr.activate = vlv_psr_activate;
1091                 dev_priv->psr.setup_vsc = vlv_psr_setup_vsc;
1092         } else {
1093                 dev_priv->psr.enable_source = hsw_psr_enable_source;
1094                 dev_priv->psr.disable_source = hsw_psr_disable;
1095                 dev_priv->psr.enable_sink = hsw_psr_enable_sink;
1096                 dev_priv->psr.activate = hsw_psr_activate;
1097                 dev_priv->psr.setup_vsc = hsw_psr_setup_vsc;
1098         }
1099 }
This page took 0.100652 seconds and 4 git commands to generate.