]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/intel_display.c
drm/i915: VCS is not the last ring
[linux.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <[email protected]>
25  */
26
27 #include <linux/dmi.h>
28 #include <linux/cpufreq.h>
29 #include <linux/module.h>
30 #include <linux/input.h>
31 #include <linux/i2c.h>
32 #include <linux/kernel.h>
33 #include <linux/slab.h>
34 #include <linux/vgaarb.h>
35 #include <drm/drm_edid.h>
36 #include "drmP.h"
37 #include "intel_drv.h"
38 #include "i915_drm.h"
39 #include "i915_drv.h"
40 #include "i915_trace.h"
41 #include "drm_dp_helper.h"
42 #include "drm_crtc_helper.h"
43 #include <linux/dma_remapping.h>
44
45 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
46
47 bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
48 static void intel_update_watermarks(struct drm_device *dev);
49 static void intel_increase_pllclock(struct drm_crtc *crtc);
50 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
51
52 typedef struct {
53         /* given values */
54         int n;
55         int m1, m2;
56         int p1, p2;
57         /* derived values */
58         int     dot;
59         int     vco;
60         int     m;
61         int     p;
62 } intel_clock_t;
63
64 typedef struct {
65         int     min, max;
66 } intel_range_t;
67
68 typedef struct {
69         int     dot_limit;
70         int     p2_slow, p2_fast;
71 } intel_p2_t;
72
73 #define INTEL_P2_NUM                  2
74 typedef struct intel_limit intel_limit_t;
75 struct intel_limit {
76         intel_range_t   dot, vco, n, m, m1, m2, p, p1;
77         intel_p2_t          p2;
78         bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
79                         int, int, intel_clock_t *, intel_clock_t *);
80 };
81
82 /* FDI */
83 #define IRONLAKE_FDI_FREQ               2700000 /* in kHz for mode->clock */
84
85 static bool
86 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
87                     int target, int refclk, intel_clock_t *match_clock,
88                     intel_clock_t *best_clock);
89 static bool
90 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
91                         int target, int refclk, intel_clock_t *match_clock,
92                         intel_clock_t *best_clock);
93
94 static bool
95 intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
96                       int target, int refclk, intel_clock_t *match_clock,
97                       intel_clock_t *best_clock);
98 static bool
99 intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
100                            int target, int refclk, intel_clock_t *match_clock,
101                            intel_clock_t *best_clock);
102
103 static inline u32 /* units of 100MHz */
104 intel_fdi_link_freq(struct drm_device *dev)
105 {
106         if (IS_GEN5(dev)) {
107                 struct drm_i915_private *dev_priv = dev->dev_private;
108                 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
109         } else
110                 return 27;
111 }
112
113 static const intel_limit_t intel_limits_i8xx_dvo = {
114         .dot = { .min = 25000, .max = 350000 },
115         .vco = { .min = 930000, .max = 1400000 },
116         .n = { .min = 3, .max = 16 },
117         .m = { .min = 96, .max = 140 },
118         .m1 = { .min = 18, .max = 26 },
119         .m2 = { .min = 6, .max = 16 },
120         .p = { .min = 4, .max = 128 },
121         .p1 = { .min = 2, .max = 33 },
122         .p2 = { .dot_limit = 165000,
123                 .p2_slow = 4, .p2_fast = 2 },
124         .find_pll = intel_find_best_PLL,
125 };
126
127 static const intel_limit_t intel_limits_i8xx_lvds = {
128         .dot = { .min = 25000, .max = 350000 },
129         .vco = { .min = 930000, .max = 1400000 },
130         .n = { .min = 3, .max = 16 },
131         .m = { .min = 96, .max = 140 },
132         .m1 = { .min = 18, .max = 26 },
133         .m2 = { .min = 6, .max = 16 },
134         .p = { .min = 4, .max = 128 },
135         .p1 = { .min = 1, .max = 6 },
136         .p2 = { .dot_limit = 165000,
137                 .p2_slow = 14, .p2_fast = 7 },
138         .find_pll = intel_find_best_PLL,
139 };
140
141 static const intel_limit_t intel_limits_i9xx_sdvo = {
142         .dot = { .min = 20000, .max = 400000 },
143         .vco = { .min = 1400000, .max = 2800000 },
144         .n = { .min = 1, .max = 6 },
145         .m = { .min = 70, .max = 120 },
146         .m1 = { .min = 10, .max = 22 },
147         .m2 = { .min = 5, .max = 9 },
148         .p = { .min = 5, .max = 80 },
149         .p1 = { .min = 1, .max = 8 },
150         .p2 = { .dot_limit = 200000,
151                 .p2_slow = 10, .p2_fast = 5 },
152         .find_pll = intel_find_best_PLL,
153 };
154
155 static const intel_limit_t intel_limits_i9xx_lvds = {
156         .dot = { .min = 20000, .max = 400000 },
157         .vco = { .min = 1400000, .max = 2800000 },
158         .n = { .min = 1, .max = 6 },
159         .m = { .min = 70, .max = 120 },
160         .m1 = { .min = 10, .max = 22 },
161         .m2 = { .min = 5, .max = 9 },
162         .p = { .min = 7, .max = 98 },
163         .p1 = { .min = 1, .max = 8 },
164         .p2 = { .dot_limit = 112000,
165                 .p2_slow = 14, .p2_fast = 7 },
166         .find_pll = intel_find_best_PLL,
167 };
168
169
170 static const intel_limit_t intel_limits_g4x_sdvo = {
171         .dot = { .min = 25000, .max = 270000 },
172         .vco = { .min = 1750000, .max = 3500000},
173         .n = { .min = 1, .max = 4 },
174         .m = { .min = 104, .max = 138 },
175         .m1 = { .min = 17, .max = 23 },
176         .m2 = { .min = 5, .max = 11 },
177         .p = { .min = 10, .max = 30 },
178         .p1 = { .min = 1, .max = 3},
179         .p2 = { .dot_limit = 270000,
180                 .p2_slow = 10,
181                 .p2_fast = 10
182         },
183         .find_pll = intel_g4x_find_best_PLL,
184 };
185
186 static const intel_limit_t intel_limits_g4x_hdmi = {
187         .dot = { .min = 22000, .max = 400000 },
188         .vco = { .min = 1750000, .max = 3500000},
189         .n = { .min = 1, .max = 4 },
190         .m = { .min = 104, .max = 138 },
191         .m1 = { .min = 16, .max = 23 },
192         .m2 = { .min = 5, .max = 11 },
193         .p = { .min = 5, .max = 80 },
194         .p1 = { .min = 1, .max = 8},
195         .p2 = { .dot_limit = 165000,
196                 .p2_slow = 10, .p2_fast = 5 },
197         .find_pll = intel_g4x_find_best_PLL,
198 };
199
200 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
201         .dot = { .min = 20000, .max = 115000 },
202         .vco = { .min = 1750000, .max = 3500000 },
203         .n = { .min = 1, .max = 3 },
204         .m = { .min = 104, .max = 138 },
205         .m1 = { .min = 17, .max = 23 },
206         .m2 = { .min = 5, .max = 11 },
207         .p = { .min = 28, .max = 112 },
208         .p1 = { .min = 2, .max = 8 },
209         .p2 = { .dot_limit = 0,
210                 .p2_slow = 14, .p2_fast = 14
211         },
212         .find_pll = intel_g4x_find_best_PLL,
213 };
214
215 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
216         .dot = { .min = 80000, .max = 224000 },
217         .vco = { .min = 1750000, .max = 3500000 },
218         .n = { .min = 1, .max = 3 },
219         .m = { .min = 104, .max = 138 },
220         .m1 = { .min = 17, .max = 23 },
221         .m2 = { .min = 5, .max = 11 },
222         .p = { .min = 14, .max = 42 },
223         .p1 = { .min = 2, .max = 6 },
224         .p2 = { .dot_limit = 0,
225                 .p2_slow = 7, .p2_fast = 7
226         },
227         .find_pll = intel_g4x_find_best_PLL,
228 };
229
230 static const intel_limit_t intel_limits_g4x_display_port = {
231         .dot = { .min = 161670, .max = 227000 },
232         .vco = { .min = 1750000, .max = 3500000},
233         .n = { .min = 1, .max = 2 },
234         .m = { .min = 97, .max = 108 },
235         .m1 = { .min = 0x10, .max = 0x12 },
236         .m2 = { .min = 0x05, .max = 0x06 },
237         .p = { .min = 10, .max = 20 },
238         .p1 = { .min = 1, .max = 2},
239         .p2 = { .dot_limit = 0,
240                 .p2_slow = 10, .p2_fast = 10 },
241         .find_pll = intel_find_pll_g4x_dp,
242 };
243
244 static const intel_limit_t intel_limits_pineview_sdvo = {
245         .dot = { .min = 20000, .max = 400000},
246         .vco = { .min = 1700000, .max = 3500000 },
247         /* Pineview's Ncounter is a ring counter */
248         .n = { .min = 3, .max = 6 },
249         .m = { .min = 2, .max = 256 },
250         /* Pineview only has one combined m divider, which we treat as m2. */
251         .m1 = { .min = 0, .max = 0 },
252         .m2 = { .min = 0, .max = 254 },
253         .p = { .min = 5, .max = 80 },
254         .p1 = { .min = 1, .max = 8 },
255         .p2 = { .dot_limit = 200000,
256                 .p2_slow = 10, .p2_fast = 5 },
257         .find_pll = intel_find_best_PLL,
258 };
259
260 static const intel_limit_t intel_limits_pineview_lvds = {
261         .dot = { .min = 20000, .max = 400000 },
262         .vco = { .min = 1700000, .max = 3500000 },
263         .n = { .min = 3, .max = 6 },
264         .m = { .min = 2, .max = 256 },
265         .m1 = { .min = 0, .max = 0 },
266         .m2 = { .min = 0, .max = 254 },
267         .p = { .min = 7, .max = 112 },
268         .p1 = { .min = 1, .max = 8 },
269         .p2 = { .dot_limit = 112000,
270                 .p2_slow = 14, .p2_fast = 14 },
271         .find_pll = intel_find_best_PLL,
272 };
273
274 /* Ironlake / Sandybridge
275  *
276  * We calculate clock using (register_value + 2) for N/M1/M2, so here
277  * the range value for them is (actual_value - 2).
278  */
279 static const intel_limit_t intel_limits_ironlake_dac = {
280         .dot = { .min = 25000, .max = 350000 },
281         .vco = { .min = 1760000, .max = 3510000 },
282         .n = { .min = 1, .max = 5 },
283         .m = { .min = 79, .max = 127 },
284         .m1 = { .min = 12, .max = 22 },
285         .m2 = { .min = 5, .max = 9 },
286         .p = { .min = 5, .max = 80 },
287         .p1 = { .min = 1, .max = 8 },
288         .p2 = { .dot_limit = 225000,
289                 .p2_slow = 10, .p2_fast = 5 },
290         .find_pll = intel_g4x_find_best_PLL,
291 };
292
293 static const intel_limit_t intel_limits_ironlake_single_lvds = {
294         .dot = { .min = 25000, .max = 350000 },
295         .vco = { .min = 1760000, .max = 3510000 },
296         .n = { .min = 1, .max = 3 },
297         .m = { .min = 79, .max = 118 },
298         .m1 = { .min = 12, .max = 22 },
299         .m2 = { .min = 5, .max = 9 },
300         .p = { .min = 28, .max = 112 },
301         .p1 = { .min = 2, .max = 8 },
302         .p2 = { .dot_limit = 225000,
303                 .p2_slow = 14, .p2_fast = 14 },
304         .find_pll = intel_g4x_find_best_PLL,
305 };
306
307 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
308         .dot = { .min = 25000, .max = 350000 },
309         .vco = { .min = 1760000, .max = 3510000 },
310         .n = { .min = 1, .max = 3 },
311         .m = { .min = 79, .max = 127 },
312         .m1 = { .min = 12, .max = 22 },
313         .m2 = { .min = 5, .max = 9 },
314         .p = { .min = 14, .max = 56 },
315         .p1 = { .min = 2, .max = 8 },
316         .p2 = { .dot_limit = 225000,
317                 .p2_slow = 7, .p2_fast = 7 },
318         .find_pll = intel_g4x_find_best_PLL,
319 };
320
321 /* LVDS 100mhz refclk limits. */
322 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
323         .dot = { .min = 25000, .max = 350000 },
324         .vco = { .min = 1760000, .max = 3510000 },
325         .n = { .min = 1, .max = 2 },
326         .m = { .min = 79, .max = 126 },
327         .m1 = { .min = 12, .max = 22 },
328         .m2 = { .min = 5, .max = 9 },
329         .p = { .min = 28, .max = 112 },
330         .p1 = { .min = 2, .max = 8 },
331         .p2 = { .dot_limit = 225000,
332                 .p2_slow = 14, .p2_fast = 14 },
333         .find_pll = intel_g4x_find_best_PLL,
334 };
335
336 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
337         .dot = { .min = 25000, .max = 350000 },
338         .vco = { .min = 1760000, .max = 3510000 },
339         .n = { .min = 1, .max = 3 },
340         .m = { .min = 79, .max = 126 },
341         .m1 = { .min = 12, .max = 22 },
342         .m2 = { .min = 5, .max = 9 },
343         .p = { .min = 14, .max = 42 },
344         .p1 = { .min = 2, .max = 6 },
345         .p2 = { .dot_limit = 225000,
346                 .p2_slow = 7, .p2_fast = 7 },
347         .find_pll = intel_g4x_find_best_PLL,
348 };
349
350 static const intel_limit_t intel_limits_ironlake_display_port = {
351         .dot = { .min = 25000, .max = 350000 },
352         .vco = { .min = 1760000, .max = 3510000},
353         .n = { .min = 1, .max = 2 },
354         .m = { .min = 81, .max = 90 },
355         .m1 = { .min = 12, .max = 22 },
356         .m2 = { .min = 5, .max = 9 },
357         .p = { .min = 10, .max = 20 },
358         .p1 = { .min = 1, .max = 2},
359         .p2 = { .dot_limit = 0,
360                 .p2_slow = 10, .p2_fast = 10 },
361         .find_pll = intel_find_pll_ironlake_dp,
362 };
363
364 u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
365 {
366         unsigned long flags;
367         u32 val = 0;
368
369         spin_lock_irqsave(&dev_priv->dpio_lock, flags);
370         if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
371                 DRM_ERROR("DPIO idle wait timed out\n");
372                 goto out_unlock;
373         }
374
375         I915_WRITE(DPIO_REG, reg);
376         I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
377                    DPIO_BYTE);
378         if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
379                 DRM_ERROR("DPIO read wait timed out\n");
380                 goto out_unlock;
381         }
382         val = I915_READ(DPIO_DATA);
383
384 out_unlock:
385         spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
386         return val;
387 }
388
389 static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
390                              u32 val)
391 {
392         unsigned long flags;
393
394         spin_lock_irqsave(&dev_priv->dpio_lock, flags);
395         if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
396                 DRM_ERROR("DPIO idle wait timed out\n");
397                 goto out_unlock;
398         }
399
400         I915_WRITE(DPIO_DATA, val);
401         I915_WRITE(DPIO_REG, reg);
402         I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
403                    DPIO_BYTE);
404         if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
405                 DRM_ERROR("DPIO write wait timed out\n");
406
407 out_unlock:
408         spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
409 }
410
411 static void vlv_init_dpio(struct drm_device *dev)
412 {
413         struct drm_i915_private *dev_priv = dev->dev_private;
414
415         /* Reset the DPIO config */
416         I915_WRITE(DPIO_CTL, 0);
417         POSTING_READ(DPIO_CTL);
418         I915_WRITE(DPIO_CTL, 1);
419         POSTING_READ(DPIO_CTL);
420 }
421
422 static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
423 {
424         DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
425         return 1;
426 }
427
428 static const struct dmi_system_id intel_dual_link_lvds[] = {
429         {
430                 .callback = intel_dual_link_lvds_callback,
431                 .ident = "Apple MacBook Pro (Core i5/i7 Series)",
432                 .matches = {
433                         DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
434                         DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
435                 },
436         },
437         { }     /* terminating entry */
438 };
439
440 static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
441                               unsigned int reg)
442 {
443         unsigned int val;
444
445         /* use the module option value if specified */
446         if (i915_lvds_channel_mode > 0)
447                 return i915_lvds_channel_mode == 2;
448
449         if (dmi_check_system(intel_dual_link_lvds))
450                 return true;
451
452         if (dev_priv->lvds_val)
453                 val = dev_priv->lvds_val;
454         else {
455                 /* BIOS should set the proper LVDS register value at boot, but
456                  * in reality, it doesn't set the value when the lid is closed;
457                  * we need to check "the value to be set" in VBT when LVDS
458                  * register is uninitialized.
459                  */
460                 val = I915_READ(reg);
461                 if (!(val & ~LVDS_DETECTED))
462                         val = dev_priv->bios_lvds_val;
463                 dev_priv->lvds_val = val;
464         }
465         return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
466 }
467
468 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
469                                                 int refclk)
470 {
471         struct drm_device *dev = crtc->dev;
472         struct drm_i915_private *dev_priv = dev->dev_private;
473         const intel_limit_t *limit;
474
475         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
476                 if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
477                         /* LVDS dual channel */
478                         if (refclk == 100000)
479                                 limit = &intel_limits_ironlake_dual_lvds_100m;
480                         else
481                                 limit = &intel_limits_ironlake_dual_lvds;
482                 } else {
483                         if (refclk == 100000)
484                                 limit = &intel_limits_ironlake_single_lvds_100m;
485                         else
486                                 limit = &intel_limits_ironlake_single_lvds;
487                 }
488         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
489                         HAS_eDP)
490                 limit = &intel_limits_ironlake_display_port;
491         else
492                 limit = &intel_limits_ironlake_dac;
493
494         return limit;
495 }
496
497 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
498 {
499         struct drm_device *dev = crtc->dev;
500         struct drm_i915_private *dev_priv = dev->dev_private;
501         const intel_limit_t *limit;
502
503         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
504                 if (is_dual_link_lvds(dev_priv, LVDS))
505                         /* LVDS with dual channel */
506                         limit = &intel_limits_g4x_dual_channel_lvds;
507                 else
508                         /* LVDS with dual channel */
509                         limit = &intel_limits_g4x_single_channel_lvds;
510         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
511                    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
512                 limit = &intel_limits_g4x_hdmi;
513         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
514                 limit = &intel_limits_g4x_sdvo;
515         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
516                 limit = &intel_limits_g4x_display_port;
517         } else /* The option is for other outputs */
518                 limit = &intel_limits_i9xx_sdvo;
519
520         return limit;
521 }
522
523 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
524 {
525         struct drm_device *dev = crtc->dev;
526         const intel_limit_t *limit;
527
528         if (HAS_PCH_SPLIT(dev))
529                 limit = intel_ironlake_limit(crtc, refclk);
530         else if (IS_G4X(dev)) {
531                 limit = intel_g4x_limit(crtc);
532         } else if (IS_PINEVIEW(dev)) {
533                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
534                         limit = &intel_limits_pineview_lvds;
535                 else
536                         limit = &intel_limits_pineview_sdvo;
537         } else if (!IS_GEN2(dev)) {
538                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
539                         limit = &intel_limits_i9xx_lvds;
540                 else
541                         limit = &intel_limits_i9xx_sdvo;
542         } else {
543                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
544                         limit = &intel_limits_i8xx_lvds;
545                 else
546                         limit = &intel_limits_i8xx_dvo;
547         }
548         return limit;
549 }
550
551 /* m1 is reserved as 0 in Pineview, n is a ring counter */
552 static void pineview_clock(int refclk, intel_clock_t *clock)
553 {
554         clock->m = clock->m2 + 2;
555         clock->p = clock->p1 * clock->p2;
556         clock->vco = refclk * clock->m / clock->n;
557         clock->dot = clock->vco / clock->p;
558 }
559
560 static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
561 {
562         if (IS_PINEVIEW(dev)) {
563                 pineview_clock(refclk, clock);
564                 return;
565         }
566         clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
567         clock->p = clock->p1 * clock->p2;
568         clock->vco = refclk * clock->m / (clock->n + 2);
569         clock->dot = clock->vco / clock->p;
570 }
571
572 /**
573  * Returns whether any output on the specified pipe is of the specified type
574  */
575 bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
576 {
577         struct drm_device *dev = crtc->dev;
578         struct drm_mode_config *mode_config = &dev->mode_config;
579         struct intel_encoder *encoder;
580
581         list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
582                 if (encoder->base.crtc == crtc && encoder->type == type)
583                         return true;
584
585         return false;
586 }
587
588 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
589 /**
590  * Returns whether the given set of divisors are valid for a given refclk with
591  * the given connectors.
592  */
593
594 static bool intel_PLL_is_valid(struct drm_device *dev,
595                                const intel_limit_t *limit,
596                                const intel_clock_t *clock)
597 {
598         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
599                 INTELPllInvalid("p1 out of range\n");
600         if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
601                 INTELPllInvalid("p out of range\n");
602         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
603                 INTELPllInvalid("m2 out of range\n");
604         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
605                 INTELPllInvalid("m1 out of range\n");
606         if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
607                 INTELPllInvalid("m1 <= m2\n");
608         if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
609                 INTELPllInvalid("m out of range\n");
610         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
611                 INTELPllInvalid("n out of range\n");
612         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
613                 INTELPllInvalid("vco out of range\n");
614         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
615          * connector, etc., rather than just a single range.
616          */
617         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
618                 INTELPllInvalid("dot out of range\n");
619
620         return true;
621 }
622
623 static bool
624 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
625                     int target, int refclk, intel_clock_t *match_clock,
626                     intel_clock_t *best_clock)
627
628 {
629         struct drm_device *dev = crtc->dev;
630         struct drm_i915_private *dev_priv = dev->dev_private;
631         intel_clock_t clock;
632         int err = target;
633
634         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
635             (I915_READ(LVDS)) != 0) {
636                 /*
637                  * For LVDS, if the panel is on, just rely on its current
638                  * settings for dual-channel.  We haven't figured out how to
639                  * reliably set up different single/dual channel state, if we
640                  * even can.
641                  */
642                 if (is_dual_link_lvds(dev_priv, LVDS))
643                         clock.p2 = limit->p2.p2_fast;
644                 else
645                         clock.p2 = limit->p2.p2_slow;
646         } else {
647                 if (target < limit->p2.dot_limit)
648                         clock.p2 = limit->p2.p2_slow;
649                 else
650                         clock.p2 = limit->p2.p2_fast;
651         }
652
653         memset(best_clock, 0, sizeof(*best_clock));
654
655         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
656              clock.m1++) {
657                 for (clock.m2 = limit->m2.min;
658                      clock.m2 <= limit->m2.max; clock.m2++) {
659                         /* m1 is always 0 in Pineview */
660                         if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
661                                 break;
662                         for (clock.n = limit->n.min;
663                              clock.n <= limit->n.max; clock.n++) {
664                                 for (clock.p1 = limit->p1.min;
665                                         clock.p1 <= limit->p1.max; clock.p1++) {
666                                         int this_err;
667
668                                         intel_clock(dev, refclk, &clock);
669                                         if (!intel_PLL_is_valid(dev, limit,
670                                                                 &clock))
671                                                 continue;
672                                         if (match_clock &&
673                                             clock.p != match_clock->p)
674                                                 continue;
675
676                                         this_err = abs(clock.dot - target);
677                                         if (this_err < err) {
678                                                 *best_clock = clock;
679                                                 err = this_err;
680                                         }
681                                 }
682                         }
683                 }
684         }
685
686         return (err != target);
687 }
688
689 static bool
690 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
691                         int target, int refclk, intel_clock_t *match_clock,
692                         intel_clock_t *best_clock)
693 {
694         struct drm_device *dev = crtc->dev;
695         struct drm_i915_private *dev_priv = dev->dev_private;
696         intel_clock_t clock;
697         int max_n;
698         bool found;
699         /* approximately equals target * 0.00585 */
700         int err_most = (target >> 8) + (target >> 9);
701         found = false;
702
703         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
704                 int lvds_reg;
705
706                 if (HAS_PCH_SPLIT(dev))
707                         lvds_reg = PCH_LVDS;
708                 else
709                         lvds_reg = LVDS;
710                 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
711                     LVDS_CLKB_POWER_UP)
712                         clock.p2 = limit->p2.p2_fast;
713                 else
714                         clock.p2 = limit->p2.p2_slow;
715         } else {
716                 if (target < limit->p2.dot_limit)
717                         clock.p2 = limit->p2.p2_slow;
718                 else
719                         clock.p2 = limit->p2.p2_fast;
720         }
721
722         memset(best_clock, 0, sizeof(*best_clock));
723         max_n = limit->n.max;
724         /* based on hardware requirement, prefer smaller n to precision */
725         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
726                 /* based on hardware requirement, prefere larger m1,m2 */
727                 for (clock.m1 = limit->m1.max;
728                      clock.m1 >= limit->m1.min; clock.m1--) {
729                         for (clock.m2 = limit->m2.max;
730                              clock.m2 >= limit->m2.min; clock.m2--) {
731                                 for (clock.p1 = limit->p1.max;
732                                      clock.p1 >= limit->p1.min; clock.p1--) {
733                                         int this_err;
734
735                                         intel_clock(dev, refclk, &clock);
736                                         if (!intel_PLL_is_valid(dev, limit,
737                                                                 &clock))
738                                                 continue;
739                                         if (match_clock &&
740                                             clock.p != match_clock->p)
741                                                 continue;
742
743                                         this_err = abs(clock.dot - target);
744                                         if (this_err < err_most) {
745                                                 *best_clock = clock;
746                                                 err_most = this_err;
747                                                 max_n = clock.n;
748                                                 found = true;
749                                         }
750                                 }
751                         }
752                 }
753         }
754         return found;
755 }
756
757 static bool
758 intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
759                            int target, int refclk, intel_clock_t *match_clock,
760                            intel_clock_t *best_clock)
761 {
762         struct drm_device *dev = crtc->dev;
763         intel_clock_t clock;
764
765         if (target < 200000) {
766                 clock.n = 1;
767                 clock.p1 = 2;
768                 clock.p2 = 10;
769                 clock.m1 = 12;
770                 clock.m2 = 9;
771         } else {
772                 clock.n = 2;
773                 clock.p1 = 1;
774                 clock.p2 = 10;
775                 clock.m1 = 14;
776                 clock.m2 = 8;
777         }
778         intel_clock(dev, refclk, &clock);
779         memcpy(best_clock, &clock, sizeof(intel_clock_t));
780         return true;
781 }
782
783 /* DisplayPort has only two frequencies, 162MHz and 270MHz */
784 static bool
785 intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
786                       int target, int refclk, intel_clock_t *match_clock,
787                       intel_clock_t *best_clock)
788 {
789         intel_clock_t clock;
790         if (target < 200000) {
791                 clock.p1 = 2;
792                 clock.p2 = 10;
793                 clock.n = 2;
794                 clock.m1 = 23;
795                 clock.m2 = 8;
796         } else {
797                 clock.p1 = 1;
798                 clock.p2 = 10;
799                 clock.n = 1;
800                 clock.m1 = 14;
801                 clock.m2 = 2;
802         }
803         clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
804         clock.p = (clock.p1 * clock.p2);
805         clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
806         clock.vco = 0;
807         memcpy(best_clock, &clock, sizeof(intel_clock_t));
808         return true;
809 }
810
811 /**
812  * intel_wait_for_vblank - wait for vblank on a given pipe
813  * @dev: drm device
814  * @pipe: pipe to wait for
815  *
816  * Wait for vblank to occur on a given pipe.  Needed for various bits of
817  * mode setting code.
818  */
819 void intel_wait_for_vblank(struct drm_device *dev, int pipe)
820 {
821         struct drm_i915_private *dev_priv = dev->dev_private;
822         int pipestat_reg = PIPESTAT(pipe);
823
824         /* Clear existing vblank status. Note this will clear any other
825          * sticky status fields as well.
826          *
827          * This races with i915_driver_irq_handler() with the result
828          * that either function could miss a vblank event.  Here it is not
829          * fatal, as we will either wait upon the next vblank interrupt or
830          * timeout.  Generally speaking intel_wait_for_vblank() is only
831          * called during modeset at which time the GPU should be idle and
832          * should *not* be performing page flips and thus not waiting on
833          * vblanks...
834          * Currently, the result of us stealing a vblank from the irq
835          * handler is that a single frame will be skipped during swapbuffers.
836          */
837         I915_WRITE(pipestat_reg,
838                    I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
839
840         /* Wait for vblank interrupt bit to set */
841         if (wait_for(I915_READ(pipestat_reg) &
842                      PIPE_VBLANK_INTERRUPT_STATUS,
843                      50))
844                 DRM_DEBUG_KMS("vblank wait timed out\n");
845 }
846
847 /*
848  * intel_wait_for_pipe_off - wait for pipe to turn off
849  * @dev: drm device
850  * @pipe: pipe to wait for
851  *
852  * After disabling a pipe, we can't wait for vblank in the usual way,
853  * spinning on the vblank interrupt status bit, since we won't actually
854  * see an interrupt when the pipe is disabled.
855  *
856  * On Gen4 and above:
857  *   wait for the pipe register state bit to turn off
858  *
859  * Otherwise:
860  *   wait for the display line value to settle (it usually
861  *   ends up stopping at the start of the next frame).
862  *
863  */
864 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
865 {
866         struct drm_i915_private *dev_priv = dev->dev_private;
867
868         if (INTEL_INFO(dev)->gen >= 4) {
869                 int reg = PIPECONF(pipe);
870
871                 /* Wait for the Pipe State to go off */
872                 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
873                              100))
874                         DRM_DEBUG_KMS("pipe_off wait timed out\n");
875         } else {
876                 u32 last_line;
877                 int reg = PIPEDSL(pipe);
878                 unsigned long timeout = jiffies + msecs_to_jiffies(100);
879
880                 /* Wait for the display line to settle */
881                 do {
882                         last_line = I915_READ(reg) & DSL_LINEMASK;
883                         mdelay(5);
884                 } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
885                          time_after(timeout, jiffies));
886                 if (time_after(jiffies, timeout))
887                         DRM_DEBUG_KMS("pipe_off wait timed out\n");
888         }
889 }
890
891 static const char *state_string(bool enabled)
892 {
893         return enabled ? "on" : "off";
894 }
895
896 /* Only for pre-ILK configs */
897 static void assert_pll(struct drm_i915_private *dev_priv,
898                        enum pipe pipe, bool state)
899 {
900         int reg;
901         u32 val;
902         bool cur_state;
903
904         reg = DPLL(pipe);
905         val = I915_READ(reg);
906         cur_state = !!(val & DPLL_VCO_ENABLE);
907         WARN(cur_state != state,
908              "PLL state assertion failure (expected %s, current %s)\n",
909              state_string(state), state_string(cur_state));
910 }
911 #define assert_pll_enabled(d, p) assert_pll(d, p, true)
912 #define assert_pll_disabled(d, p) assert_pll(d, p, false)
913
914 /* For ILK+ */
915 static void assert_pch_pll(struct drm_i915_private *dev_priv,
916                            enum pipe pipe, bool state)
917 {
918         int reg;
919         u32 val;
920         bool cur_state;
921
922         if (HAS_PCH_CPT(dev_priv->dev)) {
923                 u32 pch_dpll;
924
925                 pch_dpll = I915_READ(PCH_DPLL_SEL);
926
927                 /* Make sure the selected PLL is enabled to the transcoder */
928                 WARN(!((pch_dpll >> (4 * pipe)) & 8),
929                      "transcoder %d PLL not enabled\n", pipe);
930
931                 /* Convert the transcoder pipe number to a pll pipe number */
932                 pipe = (pch_dpll >> (4 * pipe)) & 1;
933         }
934
935         reg = PCH_DPLL(pipe);
936         val = I915_READ(reg);
937         cur_state = !!(val & DPLL_VCO_ENABLE);
938         WARN(cur_state != state,
939              "PCH PLL state assertion failure (expected %s, current %s)\n",
940              state_string(state), state_string(cur_state));
941 }
942 #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
943 #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
944
945 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
946                           enum pipe pipe, bool state)
947 {
948         int reg;
949         u32 val;
950         bool cur_state;
951
952         reg = FDI_TX_CTL(pipe);
953         val = I915_READ(reg);
954         cur_state = !!(val & FDI_TX_ENABLE);
955         WARN(cur_state != state,
956              "FDI TX state assertion failure (expected %s, current %s)\n",
957              state_string(state), state_string(cur_state));
958 }
959 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
960 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
961
962 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
963                           enum pipe pipe, bool state)
964 {
965         int reg;
966         u32 val;
967         bool cur_state;
968
969         reg = FDI_RX_CTL(pipe);
970         val = I915_READ(reg);
971         cur_state = !!(val & FDI_RX_ENABLE);
972         WARN(cur_state != state,
973              "FDI RX state assertion failure (expected %s, current %s)\n",
974              state_string(state), state_string(cur_state));
975 }
976 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
977 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
978
979 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
980                                       enum pipe pipe)
981 {
982         int reg;
983         u32 val;
984
985         /* ILK FDI PLL is always enabled */
986         if (dev_priv->info->gen == 5)
987                 return;
988
989         reg = FDI_TX_CTL(pipe);
990         val = I915_READ(reg);
991         WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
992 }
993
994 static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
995                                       enum pipe pipe)
996 {
997         int reg;
998         u32 val;
999
1000         reg = FDI_RX_CTL(pipe);
1001         val = I915_READ(reg);
1002         WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
1003 }
1004
1005 static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1006                                   enum pipe pipe)
1007 {
1008         int pp_reg, lvds_reg;
1009         u32 val;
1010         enum pipe panel_pipe = PIPE_A;
1011         bool locked = true;
1012
1013         if (HAS_PCH_SPLIT(dev_priv->dev)) {
1014                 pp_reg = PCH_PP_CONTROL;
1015                 lvds_reg = PCH_LVDS;
1016         } else {
1017                 pp_reg = PP_CONTROL;
1018                 lvds_reg = LVDS;
1019         }
1020
1021         val = I915_READ(pp_reg);
1022         if (!(val & PANEL_POWER_ON) ||
1023             ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1024                 locked = false;
1025
1026         if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1027                 panel_pipe = PIPE_B;
1028
1029         WARN(panel_pipe == pipe && locked,
1030              "panel assertion failure, pipe %c regs locked\n",
1031              pipe_name(pipe));
1032 }
1033
1034 void assert_pipe(struct drm_i915_private *dev_priv,
1035                  enum pipe pipe, bool state)
1036 {
1037         int reg;
1038         u32 val;
1039         bool cur_state;
1040
1041         /* if we need the pipe A quirk it must be always on */
1042         if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1043                 state = true;
1044
1045         reg = PIPECONF(pipe);
1046         val = I915_READ(reg);
1047         cur_state = !!(val & PIPECONF_ENABLE);
1048         WARN(cur_state != state,
1049              "pipe %c assertion failure (expected %s, current %s)\n",
1050              pipe_name(pipe), state_string(state), state_string(cur_state));
1051 }
1052
1053 static void assert_plane(struct drm_i915_private *dev_priv,
1054                          enum plane plane, bool state)
1055 {
1056         int reg;
1057         u32 val;
1058         bool cur_state;
1059
1060         reg = DSPCNTR(plane);
1061         val = I915_READ(reg);
1062         cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1063         WARN(cur_state != state,
1064              "plane %c assertion failure (expected %s, current %s)\n",
1065              plane_name(plane), state_string(state), state_string(cur_state));
1066 }
1067
1068 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1069 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1070
1071 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1072                                    enum pipe pipe)
1073 {
1074         int reg, i;
1075         u32 val;
1076         int cur_pipe;
1077
1078         /* Planes are fixed to pipes on ILK+ */
1079         if (HAS_PCH_SPLIT(dev_priv->dev)) {
1080                 reg = DSPCNTR(pipe);
1081                 val = I915_READ(reg);
1082                 WARN((val & DISPLAY_PLANE_ENABLE),
1083                      "plane %c assertion failure, should be disabled but not\n",
1084                      plane_name(pipe));
1085                 return;
1086         }
1087
1088         /* Need to check both planes against the pipe */
1089         for (i = 0; i < 2; i++) {
1090                 reg = DSPCNTR(i);
1091                 val = I915_READ(reg);
1092                 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1093                         DISPPLANE_SEL_PIPE_SHIFT;
1094                 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1095                      "plane %c assertion failure, should be off on pipe %c but is still active\n",
1096                      plane_name(i), pipe_name(pipe));
1097         }
1098 }
1099
1100 static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1101 {
1102         u32 val;
1103         bool enabled;
1104
1105         val = I915_READ(PCH_DREF_CONTROL);
1106         enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1107                             DREF_SUPERSPREAD_SOURCE_MASK));
1108         WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1109 }
1110
1111 static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1112                                        enum pipe pipe)
1113 {
1114         int reg;
1115         u32 val;
1116         bool enabled;
1117
1118         reg = TRANSCONF(pipe);
1119         val = I915_READ(reg);
1120         enabled = !!(val & TRANS_ENABLE);
1121         WARN(enabled,
1122              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1123              pipe_name(pipe));
1124 }
1125
1126 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1127                             enum pipe pipe, u32 port_sel, u32 val)
1128 {
1129         if ((val & DP_PORT_EN) == 0)
1130                 return false;
1131
1132         if (HAS_PCH_CPT(dev_priv->dev)) {
1133                 u32     trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1134                 u32     trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1135                 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1136                         return false;
1137         } else {
1138                 if ((val & DP_PIPE_MASK) != (pipe << 30))
1139                         return false;
1140         }
1141         return true;
1142 }
1143
1144 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1145                               enum pipe pipe, u32 val)
1146 {
1147         if ((val & PORT_ENABLE) == 0)
1148                 return false;
1149
1150         if (HAS_PCH_CPT(dev_priv->dev)) {
1151                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1152                         return false;
1153         } else {
1154                 if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1155                         return false;
1156         }
1157         return true;
1158 }
1159
1160 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1161                               enum pipe pipe, u32 val)
1162 {
1163         if ((val & LVDS_PORT_EN) == 0)
1164                 return false;
1165
1166         if (HAS_PCH_CPT(dev_priv->dev)) {
1167                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1168                         return false;
1169         } else {
1170                 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1171                         return false;
1172         }
1173         return true;
1174 }
1175
1176 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1177                               enum pipe pipe, u32 val)
1178 {
1179         if ((val & ADPA_DAC_ENABLE) == 0)
1180                 return false;
1181         if (HAS_PCH_CPT(dev_priv->dev)) {
1182                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1183                         return false;
1184         } else {
1185                 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1186                         return false;
1187         }
1188         return true;
1189 }
1190
1191 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1192                                    enum pipe pipe, int reg, u32 port_sel)
1193 {
1194         u32 val = I915_READ(reg);
1195         WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1196              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1197              reg, pipe_name(pipe));
1198 }
1199
1200 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1201                                      enum pipe pipe, int reg)
1202 {
1203         u32 val = I915_READ(reg);
1204         WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
1205              "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1206              reg, pipe_name(pipe));
1207 }
1208
1209 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1210                                       enum pipe pipe)
1211 {
1212         int reg;
1213         u32 val;
1214
1215         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1216         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1217         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1218
1219         reg = PCH_ADPA;
1220         val = I915_READ(reg);
1221         WARN(adpa_pipe_enabled(dev_priv, val, pipe),
1222              "PCH VGA enabled on transcoder %c, should be disabled\n",
1223              pipe_name(pipe));
1224
1225         reg = PCH_LVDS;
1226         val = I915_READ(reg);
1227         WARN(lvds_pipe_enabled(dev_priv, val, pipe),
1228              "PCH LVDS enabled on transcoder %c, should be disabled\n",
1229              pipe_name(pipe));
1230
1231         assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1232         assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1233         assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1234 }
1235
1236 /**
1237  * intel_enable_pll - enable a PLL
1238  * @dev_priv: i915 private structure
1239  * @pipe: pipe PLL to enable
1240  *
1241  * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
1242  * make sure the PLL reg is writable first though, since the panel write
1243  * protect mechanism may be enabled.
1244  *
1245  * Note!  This is for pre-ILK only.
1246  */
1247 static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1248 {
1249         int reg;
1250         u32 val;
1251
1252         /* No really, not for ILK+ */
1253         BUG_ON(dev_priv->info->gen >= 5);
1254
1255         /* PLL is protected by panel, make sure we can write it */
1256         if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1257                 assert_panel_unlocked(dev_priv, pipe);
1258
1259         reg = DPLL(pipe);
1260         val = I915_READ(reg);
1261         val |= DPLL_VCO_ENABLE;
1262
1263         /* We do this three times for luck */
1264         I915_WRITE(reg, val);
1265         POSTING_READ(reg);
1266         udelay(150); /* wait for warmup */
1267         I915_WRITE(reg, val);
1268         POSTING_READ(reg);
1269         udelay(150); /* wait for warmup */
1270         I915_WRITE(reg, val);
1271         POSTING_READ(reg);
1272         udelay(150); /* wait for warmup */
1273 }
1274
1275 /**
1276  * intel_disable_pll - disable a PLL
1277  * @dev_priv: i915 private structure
1278  * @pipe: pipe PLL to disable
1279  *
1280  * Disable the PLL for @pipe, making sure the pipe is off first.
1281  *
1282  * Note!  This is for pre-ILK only.
1283  */
1284 static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1285 {
1286         int reg;
1287         u32 val;
1288
1289         /* Don't disable pipe A or pipe A PLLs if needed */
1290         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1291                 return;
1292
1293         /* Make sure the pipe isn't still relying on us */
1294         assert_pipe_disabled(dev_priv, pipe);
1295
1296         reg = DPLL(pipe);
1297         val = I915_READ(reg);
1298         val &= ~DPLL_VCO_ENABLE;
1299         I915_WRITE(reg, val);
1300         POSTING_READ(reg);
1301 }
1302
1303 /**
1304  * intel_enable_pch_pll - enable PCH PLL
1305  * @dev_priv: i915 private structure
1306  * @pipe: pipe PLL to enable
1307  *
1308  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1309  * drives the transcoder clock.
1310  */
1311 static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
1312                                  enum pipe pipe)
1313 {
1314         int reg;
1315         u32 val;
1316
1317         if (pipe > 1)
1318                 return;
1319
1320         /* PCH only available on ILK+ */
1321         BUG_ON(dev_priv->info->gen < 5);
1322
1323         /* PCH refclock must be enabled first */
1324         assert_pch_refclk_enabled(dev_priv);
1325
1326         reg = PCH_DPLL(pipe);
1327         val = I915_READ(reg);
1328         val |= DPLL_VCO_ENABLE;
1329         I915_WRITE(reg, val);
1330         POSTING_READ(reg);
1331         udelay(200);
1332 }
1333
1334 static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1335                                   enum pipe pipe)
1336 {
1337         int reg;
1338         u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
1339                 pll_sel = TRANSC_DPLL_ENABLE;
1340
1341         if (pipe > 1)
1342                 return;
1343
1344         /* PCH only available on ILK+ */
1345         BUG_ON(dev_priv->info->gen < 5);
1346
1347         /* Make sure transcoder isn't still depending on us */
1348         assert_transcoder_disabled(dev_priv, pipe);
1349
1350         if (pipe == 0)
1351                 pll_sel |= TRANSC_DPLLA_SEL;
1352         else if (pipe == 1)
1353                 pll_sel |= TRANSC_DPLLB_SEL;
1354
1355
1356         if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
1357                 return;
1358
1359         reg = PCH_DPLL(pipe);
1360         val = I915_READ(reg);
1361         val &= ~DPLL_VCO_ENABLE;
1362         I915_WRITE(reg, val);
1363         POSTING_READ(reg);
1364         udelay(200);
1365 }
1366
1367 static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1368                                     enum pipe pipe)
1369 {
1370         int reg;
1371         u32 val, pipeconf_val;
1372         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1373
1374         /* PCH only available on ILK+ */
1375         BUG_ON(dev_priv->info->gen < 5);
1376
1377         /* Make sure PCH DPLL is enabled */
1378         assert_pch_pll_enabled(dev_priv, pipe);
1379
1380         /* FDI must be feeding us bits for PCH ports */
1381         assert_fdi_tx_enabled(dev_priv, pipe);
1382         assert_fdi_rx_enabled(dev_priv, pipe);
1383
1384         reg = TRANSCONF(pipe);
1385         val = I915_READ(reg);
1386         pipeconf_val = I915_READ(PIPECONF(pipe));
1387
1388         if (HAS_PCH_IBX(dev_priv->dev)) {
1389                 /*
1390                  * make the BPC in transcoder be consistent with
1391                  * that in pipeconf reg.
1392                  */
1393                 val &= ~PIPE_BPC_MASK;
1394                 val |= pipeconf_val & PIPE_BPC_MASK;
1395         }
1396
1397         val &= ~TRANS_INTERLACE_MASK;
1398         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1399                 if (HAS_PCH_IBX(dev_priv->dev) &&
1400                     intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1401                         val |= TRANS_LEGACY_INTERLACED_ILK;
1402                 else
1403                         val |= TRANS_INTERLACED;
1404         else
1405                 val |= TRANS_PROGRESSIVE;
1406
1407         I915_WRITE(reg, val | TRANS_ENABLE);
1408         if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1409                 DRM_ERROR("failed to enable transcoder %d\n", pipe);
1410 }
1411
1412 static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1413                                      enum pipe pipe)
1414 {
1415         int reg;
1416         u32 val;
1417
1418         /* FDI relies on the transcoder */
1419         assert_fdi_tx_disabled(dev_priv, pipe);
1420         assert_fdi_rx_disabled(dev_priv, pipe);
1421
1422         /* Ports must be off as well */
1423         assert_pch_ports_disabled(dev_priv, pipe);
1424
1425         reg = TRANSCONF(pipe);
1426         val = I915_READ(reg);
1427         val &= ~TRANS_ENABLE;
1428         I915_WRITE(reg, val);
1429         /* wait for PCH transcoder off, transcoder state */
1430         if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1431                 DRM_ERROR("failed to disable transcoder %d\n", pipe);
1432 }
1433
1434 /**
1435  * intel_enable_pipe - enable a pipe, asserting requirements
1436  * @dev_priv: i915 private structure
1437  * @pipe: pipe to enable
1438  * @pch_port: on ILK+, is this pipe driving a PCH port or not
1439  *
1440  * Enable @pipe, making sure that various hardware specific requirements
1441  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1442  *
1443  * @pipe should be %PIPE_A or %PIPE_B.
1444  *
1445  * Will wait until the pipe is actually running (i.e. first vblank) before
1446  * returning.
1447  */
1448 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1449                               bool pch_port)
1450 {
1451         int reg;
1452         u32 val;
1453
1454         /*
1455          * A pipe without a PLL won't actually be able to drive bits from
1456          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1457          * need the check.
1458          */
1459         if (!HAS_PCH_SPLIT(dev_priv->dev))
1460                 assert_pll_enabled(dev_priv, pipe);
1461         else {
1462                 if (pch_port) {
1463                         /* if driving the PCH, we need FDI enabled */
1464                         assert_fdi_rx_pll_enabled(dev_priv, pipe);
1465                         assert_fdi_tx_pll_enabled(dev_priv, pipe);
1466                 }
1467                 /* FIXME: assert CPU port conditions for SNB+ */
1468         }
1469
1470         reg = PIPECONF(pipe);
1471         val = I915_READ(reg);
1472         if (val & PIPECONF_ENABLE)
1473                 return;
1474
1475         I915_WRITE(reg, val | PIPECONF_ENABLE);
1476         intel_wait_for_vblank(dev_priv->dev, pipe);
1477 }
1478
1479 /**
1480  * intel_disable_pipe - disable a pipe, asserting requirements
1481  * @dev_priv: i915 private structure
1482  * @pipe: pipe to disable
1483  *
1484  * Disable @pipe, making sure that various hardware specific requirements
1485  * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1486  *
1487  * @pipe should be %PIPE_A or %PIPE_B.
1488  *
1489  * Will wait until the pipe has shut down before returning.
1490  */
1491 static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1492                                enum pipe pipe)
1493 {
1494         int reg;
1495         u32 val;
1496
1497         /*
1498          * Make sure planes won't keep trying to pump pixels to us,
1499          * or we might hang the display.
1500          */
1501         assert_planes_disabled(dev_priv, pipe);
1502
1503         /* Don't disable pipe A or pipe A PLLs if needed */
1504         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1505                 return;
1506
1507         reg = PIPECONF(pipe);
1508         val = I915_READ(reg);
1509         if ((val & PIPECONF_ENABLE) == 0)
1510                 return;
1511
1512         I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1513         intel_wait_for_pipe_off(dev_priv->dev, pipe);
1514 }
1515
1516 /*
1517  * Plane regs are double buffered, going from enabled->disabled needs a
1518  * trigger in order to latch.  The display address reg provides this.
1519  */
1520 static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1521                                       enum plane plane)
1522 {
1523         I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1524         I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1525 }
1526
1527 /**
1528  * intel_enable_plane - enable a display plane on a given pipe
1529  * @dev_priv: i915 private structure
1530  * @plane: plane to enable
1531  * @pipe: pipe being fed
1532  *
1533  * Enable @plane on @pipe, making sure that @pipe is running first.
1534  */
1535 static void intel_enable_plane(struct drm_i915_private *dev_priv,
1536                                enum plane plane, enum pipe pipe)
1537 {
1538         int reg;
1539         u32 val;
1540
1541         /* If the pipe isn't enabled, we can't pump pixels and may hang */
1542         assert_pipe_enabled(dev_priv, pipe);
1543
1544         reg = DSPCNTR(plane);
1545         val = I915_READ(reg);
1546         if (val & DISPLAY_PLANE_ENABLE)
1547                 return;
1548
1549         I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1550         intel_flush_display_plane(dev_priv, plane);
1551         intel_wait_for_vblank(dev_priv->dev, pipe);
1552 }
1553
1554 /**
1555  * intel_disable_plane - disable a display plane
1556  * @dev_priv: i915 private structure
1557  * @plane: plane to disable
1558  * @pipe: pipe consuming the data
1559  *
1560  * Disable @plane; should be an independent operation.
1561  */
1562 static void intel_disable_plane(struct drm_i915_private *dev_priv,
1563                                 enum plane plane, enum pipe pipe)
1564 {
1565         int reg;
1566         u32 val;
1567
1568         reg = DSPCNTR(plane);
1569         val = I915_READ(reg);
1570         if ((val & DISPLAY_PLANE_ENABLE) == 0)
1571                 return;
1572
1573         I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1574         intel_flush_display_plane(dev_priv, plane);
1575         intel_wait_for_vblank(dev_priv->dev, pipe);
1576 }
1577
1578 static void disable_pch_dp(struct drm_i915_private *dev_priv,
1579                            enum pipe pipe, int reg, u32 port_sel)
1580 {
1581         u32 val = I915_READ(reg);
1582         if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1583                 DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1584                 I915_WRITE(reg, val & ~DP_PORT_EN);
1585         }
1586 }
1587
1588 static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1589                              enum pipe pipe, int reg)
1590 {
1591         u32 val = I915_READ(reg);
1592         if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
1593                 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1594                               reg, pipe);
1595                 I915_WRITE(reg, val & ~PORT_ENABLE);
1596         }
1597 }
1598
1599 /* Disable any ports connected to this transcoder */
1600 static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1601                                     enum pipe pipe)
1602 {
1603         u32 reg, val;
1604
1605         val = I915_READ(PCH_PP_CONTROL);
1606         I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1607
1608         disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1609         disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1610         disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1611
1612         reg = PCH_ADPA;
1613         val = I915_READ(reg);
1614         if (adpa_pipe_enabled(dev_priv, val, pipe))
1615                 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1616
1617         reg = PCH_LVDS;
1618         val = I915_READ(reg);
1619         if (lvds_pipe_enabled(dev_priv, val, pipe)) {
1620                 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1621                 I915_WRITE(reg, val & ~LVDS_PORT_EN);
1622                 POSTING_READ(reg);
1623                 udelay(100);
1624         }
1625
1626         disable_pch_hdmi(dev_priv, pipe, HDMIB);
1627         disable_pch_hdmi(dev_priv, pipe, HDMIC);
1628         disable_pch_hdmi(dev_priv, pipe, HDMID);
1629 }
1630
1631 static void i8xx_disable_fbc(struct drm_device *dev)
1632 {
1633         struct drm_i915_private *dev_priv = dev->dev_private;
1634         u32 fbc_ctl;
1635
1636         /* Disable compression */
1637         fbc_ctl = I915_READ(FBC_CONTROL);
1638         if ((fbc_ctl & FBC_CTL_EN) == 0)
1639                 return;
1640
1641         fbc_ctl &= ~FBC_CTL_EN;
1642         I915_WRITE(FBC_CONTROL, fbc_ctl);
1643
1644         /* Wait for compressing bit to clear */
1645         if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
1646                 DRM_DEBUG_KMS("FBC idle timed out\n");
1647                 return;
1648         }
1649
1650         DRM_DEBUG_KMS("disabled FBC\n");
1651 }
1652
1653 static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1654 {
1655         struct drm_device *dev = crtc->dev;
1656         struct drm_i915_private *dev_priv = dev->dev_private;
1657         struct drm_framebuffer *fb = crtc->fb;
1658         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1659         struct drm_i915_gem_object *obj = intel_fb->obj;
1660         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1661         int cfb_pitch;
1662         int plane, i;
1663         u32 fbc_ctl, fbc_ctl2;
1664
1665         cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1666         if (fb->pitches[0] < cfb_pitch)
1667                 cfb_pitch = fb->pitches[0];
1668
1669         /* FBC_CTL wants 64B units */
1670         cfb_pitch = (cfb_pitch / 64) - 1;
1671         plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1672
1673         /* Clear old tags */
1674         for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1675                 I915_WRITE(FBC_TAG + (i * 4), 0);
1676
1677         /* Set it up... */
1678         fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
1679         fbc_ctl2 |= plane;
1680         I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1681         I915_WRITE(FBC_FENCE_OFF, crtc->y);
1682
1683         /* enable it... */
1684         fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1685         if (IS_I945GM(dev))
1686                 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1687         fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1688         fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1689         fbc_ctl |= obj->fence_reg;
1690         I915_WRITE(FBC_CONTROL, fbc_ctl);
1691
1692         DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
1693                       cfb_pitch, crtc->y, intel_crtc->plane);
1694 }
1695
1696 static bool i8xx_fbc_enabled(struct drm_device *dev)
1697 {
1698         struct drm_i915_private *dev_priv = dev->dev_private;
1699
1700         return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1701 }
1702
1703 static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1704 {
1705         struct drm_device *dev = crtc->dev;
1706         struct drm_i915_private *dev_priv = dev->dev_private;
1707         struct drm_framebuffer *fb = crtc->fb;
1708         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1709         struct drm_i915_gem_object *obj = intel_fb->obj;
1710         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1711         int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1712         unsigned long stall_watermark = 200;
1713         u32 dpfc_ctl;
1714
1715         dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1716         dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
1717         I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1718
1719         I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1720                    (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1721                    (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1722         I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1723
1724         /* enable it... */
1725         I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1726
1727         DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1728 }
1729
1730 static void g4x_disable_fbc(struct drm_device *dev)
1731 {
1732         struct drm_i915_private *dev_priv = dev->dev_private;
1733         u32 dpfc_ctl;
1734
1735         /* Disable compression */
1736         dpfc_ctl = I915_READ(DPFC_CONTROL);
1737         if (dpfc_ctl & DPFC_CTL_EN) {
1738                 dpfc_ctl &= ~DPFC_CTL_EN;
1739                 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1740
1741                 DRM_DEBUG_KMS("disabled FBC\n");
1742         }
1743 }
1744
1745 static bool g4x_fbc_enabled(struct drm_device *dev)
1746 {
1747         struct drm_i915_private *dev_priv = dev->dev_private;
1748
1749         return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1750 }
1751
1752 static void sandybridge_blit_fbc_update(struct drm_device *dev)
1753 {
1754         struct drm_i915_private *dev_priv = dev->dev_private;
1755         u32 blt_ecoskpd;
1756
1757         /* Make sure blitter notifies FBC of writes */
1758         gen6_gt_force_wake_get(dev_priv);
1759         blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1760         blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1761                 GEN6_BLITTER_LOCK_SHIFT;
1762         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1763         blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1764         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1765         blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1766                          GEN6_BLITTER_LOCK_SHIFT);
1767         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1768         POSTING_READ(GEN6_BLITTER_ECOSKPD);
1769         gen6_gt_force_wake_put(dev_priv);
1770 }
1771
1772 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1773 {
1774         struct drm_device *dev = crtc->dev;
1775         struct drm_i915_private *dev_priv = dev->dev_private;
1776         struct drm_framebuffer *fb = crtc->fb;
1777         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1778         struct drm_i915_gem_object *obj = intel_fb->obj;
1779         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1780         int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1781         unsigned long stall_watermark = 200;
1782         u32 dpfc_ctl;
1783
1784         dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1785         dpfc_ctl &= DPFC_RESERVED;
1786         dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1787         /* Set persistent mode for front-buffer rendering, ala X. */
1788         dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
1789         dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
1790         I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1791
1792         I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1793                    (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1794                    (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1795         I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1796         I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1797         /* enable it... */
1798         I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1799
1800         if (IS_GEN6(dev)) {
1801                 I915_WRITE(SNB_DPFC_CTL_SA,
1802                            SNB_CPU_FENCE_ENABLE | obj->fence_reg);
1803                 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1804                 sandybridge_blit_fbc_update(dev);
1805         }
1806
1807         DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1808 }
1809
1810 static void ironlake_disable_fbc(struct drm_device *dev)
1811 {
1812         struct drm_i915_private *dev_priv = dev->dev_private;
1813         u32 dpfc_ctl;
1814
1815         /* Disable compression */
1816         dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1817         if (dpfc_ctl & DPFC_CTL_EN) {
1818                 dpfc_ctl &= ~DPFC_CTL_EN;
1819                 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1820
1821                 DRM_DEBUG_KMS("disabled FBC\n");
1822         }
1823 }
1824
1825 static bool ironlake_fbc_enabled(struct drm_device *dev)
1826 {
1827         struct drm_i915_private *dev_priv = dev->dev_private;
1828
1829         return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1830 }
1831
1832 bool intel_fbc_enabled(struct drm_device *dev)
1833 {
1834         struct drm_i915_private *dev_priv = dev->dev_private;
1835
1836         if (!dev_priv->display.fbc_enabled)
1837                 return false;
1838
1839         return dev_priv->display.fbc_enabled(dev);
1840 }
1841
1842 static void intel_fbc_work_fn(struct work_struct *__work)
1843 {
1844         struct intel_fbc_work *work =
1845                 container_of(to_delayed_work(__work),
1846                              struct intel_fbc_work, work);
1847         struct drm_device *dev = work->crtc->dev;
1848         struct drm_i915_private *dev_priv = dev->dev_private;
1849
1850         mutex_lock(&dev->struct_mutex);
1851         if (work == dev_priv->fbc_work) {
1852                 /* Double check that we haven't switched fb without cancelling
1853                  * the prior work.
1854                  */
1855                 if (work->crtc->fb == work->fb) {
1856                         dev_priv->display.enable_fbc(work->crtc,
1857                                                      work->interval);
1858
1859                         dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
1860                         dev_priv->cfb_fb = work->crtc->fb->base.id;
1861                         dev_priv->cfb_y = work->crtc->y;
1862                 }
1863
1864                 dev_priv->fbc_work = NULL;
1865         }
1866         mutex_unlock(&dev->struct_mutex);
1867
1868         kfree(work);
1869 }
1870
1871 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
1872 {
1873         if (dev_priv->fbc_work == NULL)
1874                 return;
1875
1876         DRM_DEBUG_KMS("cancelling pending FBC enable\n");
1877
1878         /* Synchronisation is provided by struct_mutex and checking of
1879          * dev_priv->fbc_work, so we can perform the cancellation
1880          * entirely asynchronously.
1881          */
1882         if (cancel_delayed_work(&dev_priv->fbc_work->work))
1883                 /* tasklet was killed before being run, clean up */
1884                 kfree(dev_priv->fbc_work);
1885
1886         /* Mark the work as no longer wanted so that if it does
1887          * wake-up (because the work was already running and waiting
1888          * for our mutex), it will discover that is no longer
1889          * necessary to run.
1890          */
1891         dev_priv->fbc_work = NULL;
1892 }
1893
1894 static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1895 {
1896         struct intel_fbc_work *work;
1897         struct drm_device *dev = crtc->dev;
1898         struct drm_i915_private *dev_priv = dev->dev_private;
1899
1900         if (!dev_priv->display.enable_fbc)
1901                 return;
1902
1903         intel_cancel_fbc_work(dev_priv);
1904
1905         work = kzalloc(sizeof *work, GFP_KERNEL);
1906         if (work == NULL) {
1907                 dev_priv->display.enable_fbc(crtc, interval);
1908                 return;
1909         }
1910
1911         work->crtc = crtc;
1912         work->fb = crtc->fb;
1913         work->interval = interval;
1914         INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
1915
1916         dev_priv->fbc_work = work;
1917
1918         DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
1919
1920         /* Delay the actual enabling to let pageflipping cease and the
1921          * display to settle before starting the compression. Note that
1922          * this delay also serves a second purpose: it allows for a
1923          * vblank to pass after disabling the FBC before we attempt
1924          * to modify the control registers.
1925          *
1926          * A more complicated solution would involve tracking vblanks
1927          * following the termination of the page-flipping sequence
1928          * and indeed performing the enable as a co-routine and not
1929          * waiting synchronously upon the vblank.
1930          */
1931         schedule_delayed_work(&work->work, msecs_to_jiffies(50));
1932 }
1933
1934 void intel_disable_fbc(struct drm_device *dev)
1935 {
1936         struct drm_i915_private *dev_priv = dev->dev_private;
1937
1938         intel_cancel_fbc_work(dev_priv);
1939
1940         if (!dev_priv->display.disable_fbc)
1941                 return;
1942
1943         dev_priv->display.disable_fbc(dev);
1944         dev_priv->cfb_plane = -1;
1945 }
1946
1947 /**
1948  * intel_update_fbc - enable/disable FBC as needed
1949  * @dev: the drm_device
1950  *
1951  * Set up the framebuffer compression hardware at mode set time.  We
1952  * enable it if possible:
1953  *   - plane A only (on pre-965)
1954  *   - no pixel mulitply/line duplication
1955  *   - no alpha buffer discard
1956  *   - no dual wide
1957  *   - framebuffer <= 2048 in width, 1536 in height
1958  *
1959  * We can't assume that any compression will take place (worst case),
1960  * so the compressed buffer has to be the same size as the uncompressed
1961  * one.  It also must reside (along with the line length buffer) in
1962  * stolen memory.
1963  *
1964  * We need to enable/disable FBC on a global basis.
1965  */
1966 static void intel_update_fbc(struct drm_device *dev)
1967 {
1968         struct drm_i915_private *dev_priv = dev->dev_private;
1969         struct drm_crtc *crtc = NULL, *tmp_crtc;
1970         struct intel_crtc *intel_crtc;
1971         struct drm_framebuffer *fb;
1972         struct intel_framebuffer *intel_fb;
1973         struct drm_i915_gem_object *obj;
1974         int enable_fbc;
1975
1976         DRM_DEBUG_KMS("\n");
1977
1978         if (!i915_powersave)
1979                 return;
1980
1981         if (!I915_HAS_FBC(dev))
1982                 return;
1983
1984         /*
1985          * If FBC is already on, we just have to verify that we can
1986          * keep it that way...
1987          * Need to disable if:
1988          *   - more than one pipe is active
1989          *   - changing FBC params (stride, fence, mode)
1990          *   - new fb is too large to fit in compressed buffer
1991          *   - going to an unsupported config (interlace, pixel multiply, etc.)
1992          */
1993         list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1994                 if (tmp_crtc->enabled && tmp_crtc->fb) {
1995                         if (crtc) {
1996                                 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
1997                                 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
1998                                 goto out_disable;
1999                         }
2000                         crtc = tmp_crtc;
2001                 }
2002         }
2003
2004         if (!crtc || crtc->fb == NULL) {
2005                 DRM_DEBUG_KMS("no output, disabling\n");
2006                 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
2007                 goto out_disable;
2008         }
2009
2010         intel_crtc = to_intel_crtc(crtc);
2011         fb = crtc->fb;
2012         intel_fb = to_intel_framebuffer(fb);
2013         obj = intel_fb->obj;
2014
2015         enable_fbc = i915_enable_fbc;
2016         if (enable_fbc < 0) {
2017                 DRM_DEBUG_KMS("fbc set to per-chip default\n");
2018                 enable_fbc = 1;
2019                 if (INTEL_INFO(dev)->gen <= 6)
2020                         enable_fbc = 0;
2021         }
2022         if (!enable_fbc) {
2023                 DRM_DEBUG_KMS("fbc disabled per module param\n");
2024                 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
2025                 goto out_disable;
2026         }
2027         if (intel_fb->obj->base.size > dev_priv->cfb_size) {
2028                 DRM_DEBUG_KMS("framebuffer too large, disabling "
2029                               "compression\n");
2030                 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
2031                 goto out_disable;
2032         }
2033         if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
2034             (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
2035                 DRM_DEBUG_KMS("mode incompatible with compression, "
2036                               "disabling\n");
2037                 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
2038                 goto out_disable;
2039         }
2040         if ((crtc->mode.hdisplay > 2048) ||
2041             (crtc->mode.vdisplay > 1536)) {
2042                 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
2043                 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
2044                 goto out_disable;
2045         }
2046         if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
2047                 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
2048                 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
2049                 goto out_disable;
2050         }
2051
2052         /* The use of a CPU fence is mandatory in order to detect writes
2053          * by the CPU to the scanout and trigger updates to the FBC.
2054          */
2055         if (obj->tiling_mode != I915_TILING_X ||
2056             obj->fence_reg == I915_FENCE_REG_NONE) {
2057                 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
2058                 dev_priv->no_fbc_reason = FBC_NOT_TILED;
2059                 goto out_disable;
2060         }
2061
2062         /* If the kernel debugger is active, always disable compression */
2063         if (in_dbg_master())
2064                 goto out_disable;
2065
2066         /* If the scanout has not changed, don't modify the FBC settings.
2067          * Note that we make the fundamental assumption that the fb->obj
2068          * cannot be unpinned (and have its GTT offset and fence revoked)
2069          * without first being decoupled from the scanout and FBC disabled.
2070          */
2071         if (dev_priv->cfb_plane == intel_crtc->plane &&
2072             dev_priv->cfb_fb == fb->base.id &&
2073             dev_priv->cfb_y == crtc->y)
2074                 return;
2075
2076         if (intel_fbc_enabled(dev)) {
2077                 /* We update FBC along two paths, after changing fb/crtc
2078                  * configuration (modeswitching) and after page-flipping
2079                  * finishes. For the latter, we know that not only did
2080                  * we disable the FBC at the start of the page-flip
2081                  * sequence, but also more than one vblank has passed.
2082                  *
2083                  * For the former case of modeswitching, it is possible
2084                  * to switch between two FBC valid configurations
2085                  * instantaneously so we do need to disable the FBC
2086                  * before we can modify its control registers. We also
2087                  * have to wait for the next vblank for that to take
2088                  * effect. However, since we delay enabling FBC we can
2089                  * assume that a vblank has passed since disabling and
2090                  * that we can safely alter the registers in the deferred
2091                  * callback.
2092                  *
2093                  * In the scenario that we go from a valid to invalid
2094                  * and then back to valid FBC configuration we have
2095                  * no strict enforcement that a vblank occurred since
2096                  * disabling the FBC. However, along all current pipe
2097                  * disabling paths we do need to wait for a vblank at
2098                  * some point. And we wait before enabling FBC anyway.
2099                  */
2100                 DRM_DEBUG_KMS("disabling active FBC for update\n");
2101                 intel_disable_fbc(dev);
2102         }
2103
2104         intel_enable_fbc(crtc, 500);
2105         return;
2106
2107 out_disable:
2108         /* Multiple disables should be harmless */
2109         if (intel_fbc_enabled(dev)) {
2110                 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
2111                 intel_disable_fbc(dev);
2112         }
2113 }
2114
2115 int
2116 intel_pin_and_fence_fb_obj(struct drm_device *dev,
2117                            struct drm_i915_gem_object *obj,
2118                            struct intel_ring_buffer *pipelined)
2119 {
2120         struct drm_i915_private *dev_priv = dev->dev_private;
2121         u32 alignment;
2122         int ret;
2123
2124         switch (obj->tiling_mode) {
2125         case I915_TILING_NONE:
2126                 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2127                         alignment = 128 * 1024;
2128                 else if (INTEL_INFO(dev)->gen >= 4)
2129                         alignment = 4 * 1024;
2130                 else
2131                         alignment = 64 * 1024;
2132                 break;
2133         case I915_TILING_X:
2134                 /* pin() will align the object as required by fence */
2135                 alignment = 0;
2136                 break;
2137         case I915_TILING_Y:
2138                 /* FIXME: Is this true? */
2139                 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
2140                 return -EINVAL;
2141         default:
2142                 BUG();
2143         }
2144
2145         dev_priv->mm.interruptible = false;
2146         ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2147         if (ret)
2148                 goto err_interruptible;
2149
2150         /* Install a fence for tiled scan-out. Pre-i965 always needs a
2151          * fence, whereas 965+ only requires a fence if using
2152          * framebuffer compression.  For simplicity, we always install
2153          * a fence as the cost is not that onerous.
2154          */
2155         if (obj->tiling_mode != I915_TILING_NONE) {
2156                 ret = i915_gem_object_get_fence(obj, pipelined);
2157                 if (ret)
2158                         goto err_unpin;
2159
2160                 i915_gem_object_pin_fence(obj);
2161         }
2162
2163         dev_priv->mm.interruptible = true;
2164         return 0;
2165
2166 err_unpin:
2167         i915_gem_object_unpin(obj);
2168 err_interruptible:
2169         dev_priv->mm.interruptible = true;
2170         return ret;
2171 }
2172
2173 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2174 {
2175         i915_gem_object_unpin_fence(obj);
2176         i915_gem_object_unpin(obj);
2177 }
2178
2179 static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2180                              int x, int y)
2181 {
2182         struct drm_device *dev = crtc->dev;
2183         struct drm_i915_private *dev_priv = dev->dev_private;
2184         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2185         struct intel_framebuffer *intel_fb;
2186         struct drm_i915_gem_object *obj;
2187         int plane = intel_crtc->plane;
2188         unsigned long Start, Offset;
2189         u32 dspcntr;
2190         u32 reg;
2191
2192         switch (plane) {
2193         case 0:
2194         case 1:
2195                 break;
2196         default:
2197                 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2198                 return -EINVAL;
2199         }
2200
2201         intel_fb = to_intel_framebuffer(fb);
2202         obj = intel_fb->obj;
2203
2204         reg = DSPCNTR(plane);
2205         dspcntr = I915_READ(reg);
2206         /* Mask out pixel format bits in case we change it */
2207         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2208         switch (fb->bits_per_pixel) {
2209         case 8:
2210                 dspcntr |= DISPPLANE_8BPP;
2211                 break;
2212         case 16:
2213                 if (fb->depth == 15)
2214                         dspcntr |= DISPPLANE_15_16BPP;
2215                 else
2216                         dspcntr |= DISPPLANE_16BPP;
2217                 break;
2218         case 24:
2219         case 32:
2220                 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2221                 break;
2222         default:
2223                 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2224                 return -EINVAL;
2225         }
2226         if (INTEL_INFO(dev)->gen >= 4) {
2227                 if (obj->tiling_mode != I915_TILING_NONE)
2228                         dspcntr |= DISPPLANE_TILED;
2229                 else
2230                         dspcntr &= ~DISPPLANE_TILED;
2231         }
2232
2233         I915_WRITE(reg, dspcntr);
2234
2235         Start = obj->gtt_offset;
2236         Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2237
2238         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2239                       Start, Offset, x, y, fb->pitches[0]);
2240         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2241         if (INTEL_INFO(dev)->gen >= 4) {
2242                 I915_WRITE(DSPSURF(plane), Start);
2243                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2244                 I915_WRITE(DSPADDR(plane), Offset);
2245         } else
2246                 I915_WRITE(DSPADDR(plane), Start + Offset);
2247         POSTING_READ(reg);
2248
2249         return 0;
2250 }
2251
2252 static int ironlake_update_plane(struct drm_crtc *crtc,
2253                                  struct drm_framebuffer *fb, int x, int y)
2254 {
2255         struct drm_device *dev = crtc->dev;
2256         struct drm_i915_private *dev_priv = dev->dev_private;
2257         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2258         struct intel_framebuffer *intel_fb;
2259         struct drm_i915_gem_object *obj;
2260         int plane = intel_crtc->plane;
2261         unsigned long Start, Offset;
2262         u32 dspcntr;
2263         u32 reg;
2264
2265         switch (plane) {
2266         case 0:
2267         case 1:
2268         case 2:
2269                 break;
2270         default:
2271                 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2272                 return -EINVAL;
2273         }
2274
2275         intel_fb = to_intel_framebuffer(fb);
2276         obj = intel_fb->obj;
2277
2278         reg = DSPCNTR(plane);
2279         dspcntr = I915_READ(reg);
2280         /* Mask out pixel format bits in case we change it */
2281         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2282         switch (fb->bits_per_pixel) {
2283         case 8:
2284                 dspcntr |= DISPPLANE_8BPP;
2285                 break;
2286         case 16:
2287                 if (fb->depth != 16)
2288                         return -EINVAL;
2289
2290                 dspcntr |= DISPPLANE_16BPP;
2291                 break;
2292         case 24:
2293         case 32:
2294                 if (fb->depth == 24)
2295                         dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2296                 else if (fb->depth == 30)
2297                         dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
2298                 else
2299                         return -EINVAL;
2300                 break;
2301         default:
2302                 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2303                 return -EINVAL;
2304         }
2305
2306         if (obj->tiling_mode != I915_TILING_NONE)
2307                 dspcntr |= DISPPLANE_TILED;
2308         else
2309                 dspcntr &= ~DISPPLANE_TILED;
2310
2311         /* must disable */
2312         dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2313
2314         I915_WRITE(reg, dspcntr);
2315
2316         Start = obj->gtt_offset;
2317         Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2318
2319         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2320                       Start, Offset, x, y, fb->pitches[0]);
2321         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2322         I915_WRITE(DSPSURF(plane), Start);
2323         I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2324         I915_WRITE(DSPADDR(plane), Offset);
2325         POSTING_READ(reg);
2326
2327         return 0;
2328 }
2329
2330 /* Assume fb object is pinned & idle & fenced and just update base pointers */
2331 static int
2332 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2333                            int x, int y, enum mode_set_atomic state)
2334 {
2335         struct drm_device *dev = crtc->dev;
2336         struct drm_i915_private *dev_priv = dev->dev_private;
2337         int ret;
2338
2339         ret = dev_priv->display.update_plane(crtc, fb, x, y);
2340         if (ret)
2341                 return ret;
2342
2343         intel_update_fbc(dev);
2344         intel_increase_pllclock(crtc);
2345
2346         return 0;
2347 }
2348
2349 static int
2350 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2351                     struct drm_framebuffer *old_fb)
2352 {
2353         struct drm_device *dev = crtc->dev;
2354         struct drm_i915_master_private *master_priv;
2355         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2356         int ret;
2357
2358         /* no fb bound */
2359         if (!crtc->fb) {
2360                 DRM_ERROR("No FB bound\n");
2361                 return 0;
2362         }
2363
2364         switch (intel_crtc->plane) {
2365         case 0:
2366         case 1:
2367                 break;
2368         case 2:
2369                 if (IS_IVYBRIDGE(dev))
2370                         break;
2371                 /* fall through otherwise */
2372         default:
2373                 DRM_ERROR("no plane for crtc\n");
2374                 return -EINVAL;
2375         }
2376
2377         mutex_lock(&dev->struct_mutex);
2378         ret = intel_pin_and_fence_fb_obj(dev,
2379                                          to_intel_framebuffer(crtc->fb)->obj,
2380                                          NULL);
2381         if (ret != 0) {
2382                 mutex_unlock(&dev->struct_mutex);
2383                 DRM_ERROR("pin & fence failed\n");
2384                 return ret;
2385         }
2386
2387         if (old_fb) {
2388                 struct drm_i915_private *dev_priv = dev->dev_private;
2389                 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2390
2391                 wait_event(dev_priv->pending_flip_queue,
2392                            atomic_read(&dev_priv->mm.wedged) ||
2393                            atomic_read(&obj->pending_flip) == 0);
2394
2395                 /* Big Hammer, we also need to ensure that any pending
2396                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2397                  * current scanout is retired before unpinning the old
2398                  * framebuffer.
2399                  *
2400                  * This should only fail upon a hung GPU, in which case we
2401                  * can safely continue.
2402                  */
2403                 ret = i915_gem_object_finish_gpu(obj);
2404                 (void) ret;
2405         }
2406
2407         ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
2408                                          LEAVE_ATOMIC_MODE_SET);
2409         if (ret) {
2410                 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
2411                 mutex_unlock(&dev->struct_mutex);
2412                 DRM_ERROR("failed to update base address\n");
2413                 return ret;
2414         }
2415
2416         if (old_fb) {
2417                 intel_wait_for_vblank(dev, intel_crtc->pipe);
2418                 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2419         }
2420
2421         mutex_unlock(&dev->struct_mutex);
2422
2423         if (!dev->primary->master)
2424                 return 0;
2425
2426         master_priv = dev->primary->master->driver_priv;
2427         if (!master_priv->sarea_priv)
2428                 return 0;
2429
2430         if (intel_crtc->pipe) {
2431                 master_priv->sarea_priv->pipeB_x = x;
2432                 master_priv->sarea_priv->pipeB_y = y;
2433         } else {
2434                 master_priv->sarea_priv->pipeA_x = x;
2435                 master_priv->sarea_priv->pipeA_y = y;
2436         }
2437
2438         return 0;
2439 }
2440
2441 static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2442 {
2443         struct drm_device *dev = crtc->dev;
2444         struct drm_i915_private *dev_priv = dev->dev_private;
2445         u32 dpa_ctl;
2446
2447         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2448         dpa_ctl = I915_READ(DP_A);
2449         dpa_ctl &= ~DP_PLL_FREQ_MASK;
2450
2451         if (clock < 200000) {
2452                 u32 temp;
2453                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
2454                 /* workaround for 160Mhz:
2455                    1) program 0x4600c bits 15:0 = 0x8124
2456                    2) program 0x46010 bit 0 = 1
2457                    3) program 0x46034 bit 24 = 1
2458                    4) program 0x64000 bit 14 = 1
2459                    */
2460                 temp = I915_READ(0x4600c);
2461                 temp &= 0xffff0000;
2462                 I915_WRITE(0x4600c, temp | 0x8124);
2463
2464                 temp = I915_READ(0x46010);
2465                 I915_WRITE(0x46010, temp | 1);
2466
2467                 temp = I915_READ(0x46034);
2468                 I915_WRITE(0x46034, temp | (1 << 24));
2469         } else {
2470                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
2471         }
2472         I915_WRITE(DP_A, dpa_ctl);
2473
2474         POSTING_READ(DP_A);
2475         udelay(500);
2476 }
2477
2478 static void intel_fdi_normal_train(struct drm_crtc *crtc)
2479 {
2480         struct drm_device *dev = crtc->dev;
2481         struct drm_i915_private *dev_priv = dev->dev_private;
2482         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2483         int pipe = intel_crtc->pipe;
2484         u32 reg, temp;
2485
2486         /* enable normal train */
2487         reg = FDI_TX_CTL(pipe);
2488         temp = I915_READ(reg);
2489         if (IS_IVYBRIDGE(dev)) {
2490                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2491                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2492         } else {
2493                 temp &= ~FDI_LINK_TRAIN_NONE;
2494                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2495         }
2496         I915_WRITE(reg, temp);
2497
2498         reg = FDI_RX_CTL(pipe);
2499         temp = I915_READ(reg);
2500         if (HAS_PCH_CPT(dev)) {
2501                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2502                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2503         } else {
2504                 temp &= ~FDI_LINK_TRAIN_NONE;
2505                 temp |= FDI_LINK_TRAIN_NONE;
2506         }
2507         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2508
2509         /* wait one idle pattern time */
2510         POSTING_READ(reg);
2511         udelay(1000);
2512
2513         /* IVB wants error correction enabled */
2514         if (IS_IVYBRIDGE(dev))
2515                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2516                            FDI_FE_ERRC_ENABLE);
2517 }
2518
2519 static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
2520 {
2521         struct drm_i915_private *dev_priv = dev->dev_private;
2522         u32 flags = I915_READ(SOUTH_CHICKEN1);
2523
2524         flags |= FDI_PHASE_SYNC_OVR(pipe);
2525         I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2526         flags |= FDI_PHASE_SYNC_EN(pipe);
2527         I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2528         POSTING_READ(SOUTH_CHICKEN1);
2529 }
2530
2531 /* The FDI link training functions for ILK/Ibexpeak. */
2532 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2533 {
2534         struct drm_device *dev = crtc->dev;
2535         struct drm_i915_private *dev_priv = dev->dev_private;
2536         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2537         int pipe = intel_crtc->pipe;
2538         int plane = intel_crtc->plane;
2539         u32 reg, temp, tries;
2540
2541         /* FDI needs bits from pipe & plane first */
2542         assert_pipe_enabled(dev_priv, pipe);
2543         assert_plane_enabled(dev_priv, plane);
2544
2545         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2546            for train result */
2547         reg = FDI_RX_IMR(pipe);
2548         temp = I915_READ(reg);
2549         temp &= ~FDI_RX_SYMBOL_LOCK;
2550         temp &= ~FDI_RX_BIT_LOCK;
2551         I915_WRITE(reg, temp);
2552         I915_READ(reg);
2553         udelay(150);
2554
2555         /* enable CPU FDI TX and PCH FDI RX */
2556         reg = FDI_TX_CTL(pipe);
2557         temp = I915_READ(reg);
2558         temp &= ~(7 << 19);
2559         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2560         temp &= ~FDI_LINK_TRAIN_NONE;
2561         temp |= FDI_LINK_TRAIN_PATTERN_1;
2562         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2563
2564         reg = FDI_RX_CTL(pipe);
2565         temp = I915_READ(reg);
2566         temp &= ~FDI_LINK_TRAIN_NONE;
2567         temp |= FDI_LINK_TRAIN_PATTERN_1;
2568         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2569
2570         POSTING_READ(reg);
2571         udelay(150);
2572
2573         /* Ironlake workaround, enable clock pointer after FDI enable*/
2574         if (HAS_PCH_IBX(dev)) {
2575                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2576                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2577                            FDI_RX_PHASE_SYNC_POINTER_EN);
2578         }
2579
2580         reg = FDI_RX_IIR(pipe);
2581         for (tries = 0; tries < 5; tries++) {
2582                 temp = I915_READ(reg);
2583                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2584
2585                 if ((temp & FDI_RX_BIT_LOCK)) {
2586                         DRM_DEBUG_KMS("FDI train 1 done.\n");
2587                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2588                         break;
2589                 }
2590         }
2591         if (tries == 5)
2592                 DRM_ERROR("FDI train 1 fail!\n");
2593
2594         /* Train 2 */
2595         reg = FDI_TX_CTL(pipe);
2596         temp = I915_READ(reg);
2597         temp &= ~FDI_LINK_TRAIN_NONE;
2598         temp |= FDI_LINK_TRAIN_PATTERN_2;
2599         I915_WRITE(reg, temp);
2600
2601         reg = FDI_RX_CTL(pipe);
2602         temp = I915_READ(reg);
2603         temp &= ~FDI_LINK_TRAIN_NONE;
2604         temp |= FDI_LINK_TRAIN_PATTERN_2;
2605         I915_WRITE(reg, temp);
2606
2607         POSTING_READ(reg);
2608         udelay(150);
2609
2610         reg = FDI_RX_IIR(pipe);
2611         for (tries = 0; tries < 5; tries++) {
2612                 temp = I915_READ(reg);
2613                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2614
2615                 if (temp & FDI_RX_SYMBOL_LOCK) {
2616                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2617                         DRM_DEBUG_KMS("FDI train 2 done.\n");
2618                         break;
2619                 }
2620         }
2621         if (tries == 5)
2622                 DRM_ERROR("FDI train 2 fail!\n");
2623
2624         DRM_DEBUG_KMS("FDI train done\n");
2625
2626 }
2627
2628 static const int snb_b_fdi_train_param[] = {
2629         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2630         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2631         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2632         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2633 };
2634
2635 /* The FDI link training functions for SNB/Cougarpoint. */
2636 static void gen6_fdi_link_train(struct drm_crtc *crtc)
2637 {
2638         struct drm_device *dev = crtc->dev;
2639         struct drm_i915_private *dev_priv = dev->dev_private;
2640         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2641         int pipe = intel_crtc->pipe;
2642         u32 reg, temp, i, retry;
2643
2644         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2645            for train result */
2646         reg = FDI_RX_IMR(pipe);
2647         temp = I915_READ(reg);
2648         temp &= ~FDI_RX_SYMBOL_LOCK;
2649         temp &= ~FDI_RX_BIT_LOCK;
2650         I915_WRITE(reg, temp);
2651
2652         POSTING_READ(reg);
2653         udelay(150);
2654
2655         /* enable CPU FDI TX and PCH FDI RX */
2656         reg = FDI_TX_CTL(pipe);
2657         temp = I915_READ(reg);
2658         temp &= ~(7 << 19);
2659         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2660         temp &= ~FDI_LINK_TRAIN_NONE;
2661         temp |= FDI_LINK_TRAIN_PATTERN_1;
2662         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2663         /* SNB-B */
2664         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2665         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2666
2667         reg = FDI_RX_CTL(pipe);
2668         temp = I915_READ(reg);
2669         if (HAS_PCH_CPT(dev)) {
2670                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2671                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2672         } else {
2673                 temp &= ~FDI_LINK_TRAIN_NONE;
2674                 temp |= FDI_LINK_TRAIN_PATTERN_1;
2675         }
2676         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2677
2678         POSTING_READ(reg);
2679         udelay(150);
2680
2681         if (HAS_PCH_CPT(dev))
2682                 cpt_phase_pointer_enable(dev, pipe);
2683
2684         for (i = 0; i < 4; i++) {
2685                 reg = FDI_TX_CTL(pipe);
2686                 temp = I915_READ(reg);
2687                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2688                 temp |= snb_b_fdi_train_param[i];
2689                 I915_WRITE(reg, temp);
2690
2691                 POSTING_READ(reg);
2692                 udelay(500);
2693
2694                 for (retry = 0; retry < 5; retry++) {
2695                         reg = FDI_RX_IIR(pipe);
2696                         temp = I915_READ(reg);
2697                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2698                         if (temp & FDI_RX_BIT_LOCK) {
2699                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2700                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
2701                                 break;
2702                         }
2703                         udelay(50);
2704                 }
2705                 if (retry < 5)
2706                         break;
2707         }
2708         if (i == 4)
2709                 DRM_ERROR("FDI train 1 fail!\n");
2710
2711         /* Train 2 */
2712         reg = FDI_TX_CTL(pipe);
2713         temp = I915_READ(reg);
2714         temp &= ~FDI_LINK_TRAIN_NONE;
2715         temp |= FDI_LINK_TRAIN_PATTERN_2;
2716         if (IS_GEN6(dev)) {
2717                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2718                 /* SNB-B */
2719                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2720         }
2721         I915_WRITE(reg, temp);
2722
2723         reg = FDI_RX_CTL(pipe);
2724         temp = I915_READ(reg);
2725         if (HAS_PCH_CPT(dev)) {
2726                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2727                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2728         } else {
2729                 temp &= ~FDI_LINK_TRAIN_NONE;
2730                 temp |= FDI_LINK_TRAIN_PATTERN_2;
2731         }
2732         I915_WRITE(reg, temp);
2733
2734         POSTING_READ(reg);
2735         udelay(150);
2736
2737         for (i = 0; i < 4; i++) {
2738                 reg = FDI_TX_CTL(pipe);
2739                 temp = I915_READ(reg);
2740                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2741                 temp |= snb_b_fdi_train_param[i];
2742                 I915_WRITE(reg, temp);
2743
2744                 POSTING_READ(reg);
2745                 udelay(500);
2746
2747                 for (retry = 0; retry < 5; retry++) {
2748                         reg = FDI_RX_IIR(pipe);
2749                         temp = I915_READ(reg);
2750                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2751                         if (temp & FDI_RX_SYMBOL_LOCK) {
2752                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2753                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
2754                                 break;
2755                         }
2756                         udelay(50);
2757                 }
2758                 if (retry < 5)
2759                         break;
2760         }
2761         if (i == 4)
2762                 DRM_ERROR("FDI train 2 fail!\n");
2763
2764         DRM_DEBUG_KMS("FDI train done.\n");
2765 }
2766
2767 /* Manual link training for Ivy Bridge A0 parts */
2768 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2769 {
2770         struct drm_device *dev = crtc->dev;
2771         struct drm_i915_private *dev_priv = dev->dev_private;
2772         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2773         int pipe = intel_crtc->pipe;
2774         u32 reg, temp, i;
2775
2776         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2777            for train result */
2778         reg = FDI_RX_IMR(pipe);
2779         temp = I915_READ(reg);
2780         temp &= ~FDI_RX_SYMBOL_LOCK;
2781         temp &= ~FDI_RX_BIT_LOCK;
2782         I915_WRITE(reg, temp);
2783
2784         POSTING_READ(reg);
2785         udelay(150);
2786
2787         /* enable CPU FDI TX and PCH FDI RX */
2788         reg = FDI_TX_CTL(pipe);
2789         temp = I915_READ(reg);
2790         temp &= ~(7 << 19);
2791         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2792         temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2793         temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2794         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2795         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2796         temp |= FDI_COMPOSITE_SYNC;
2797         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2798
2799         reg = FDI_RX_CTL(pipe);
2800         temp = I915_READ(reg);
2801         temp &= ~FDI_LINK_TRAIN_AUTO;
2802         temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2803         temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2804         temp |= FDI_COMPOSITE_SYNC;
2805         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2806
2807         POSTING_READ(reg);
2808         udelay(150);
2809
2810         if (HAS_PCH_CPT(dev))
2811                 cpt_phase_pointer_enable(dev, pipe);
2812
2813         for (i = 0; i < 4; i++) {
2814                 reg = FDI_TX_CTL(pipe);
2815                 temp = I915_READ(reg);
2816                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2817                 temp |= snb_b_fdi_train_param[i];
2818                 I915_WRITE(reg, temp);
2819
2820                 POSTING_READ(reg);
2821                 udelay(500);
2822
2823                 reg = FDI_RX_IIR(pipe);
2824                 temp = I915_READ(reg);
2825                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2826
2827                 if (temp & FDI_RX_BIT_LOCK ||
2828                     (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2829                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2830                         DRM_DEBUG_KMS("FDI train 1 done.\n");
2831                         break;
2832                 }
2833         }
2834         if (i == 4)
2835                 DRM_ERROR("FDI train 1 fail!\n");
2836
2837         /* Train 2 */
2838         reg = FDI_TX_CTL(pipe);
2839         temp = I915_READ(reg);
2840         temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2841         temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2842         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2843         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2844         I915_WRITE(reg, temp);
2845
2846         reg = FDI_RX_CTL(pipe);
2847         temp = I915_READ(reg);
2848         temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2849         temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2850         I915_WRITE(reg, temp);
2851
2852         POSTING_READ(reg);
2853         udelay(150);
2854
2855         for (i = 0; i < 4; i++) {
2856                 reg = FDI_TX_CTL(pipe);
2857                 temp = I915_READ(reg);
2858                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2859                 temp |= snb_b_fdi_train_param[i];
2860                 I915_WRITE(reg, temp);
2861
2862                 POSTING_READ(reg);
2863                 udelay(500);
2864
2865                 reg = FDI_RX_IIR(pipe);
2866                 temp = I915_READ(reg);
2867                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2868
2869                 if (temp & FDI_RX_SYMBOL_LOCK) {
2870                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2871                         DRM_DEBUG_KMS("FDI train 2 done.\n");
2872                         break;
2873                 }
2874         }
2875         if (i == 4)
2876                 DRM_ERROR("FDI train 2 fail!\n");
2877
2878         DRM_DEBUG_KMS("FDI train done.\n");
2879 }
2880
2881 static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2882 {
2883         struct drm_device *dev = crtc->dev;
2884         struct drm_i915_private *dev_priv = dev->dev_private;
2885         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2886         int pipe = intel_crtc->pipe;
2887         u32 reg, temp;
2888
2889         /* Write the TU size bits so error detection works */
2890         I915_WRITE(FDI_RX_TUSIZE1(pipe),
2891                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2892
2893         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2894         reg = FDI_RX_CTL(pipe);
2895         temp = I915_READ(reg);
2896         temp &= ~((0x7 << 19) | (0x7 << 16));
2897         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2898         temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2899         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2900
2901         POSTING_READ(reg);
2902         udelay(200);
2903
2904         /* Switch from Rawclk to PCDclk */
2905         temp = I915_READ(reg);
2906         I915_WRITE(reg, temp | FDI_PCDCLK);
2907
2908         POSTING_READ(reg);
2909         udelay(200);
2910
2911         /* Enable CPU FDI TX PLL, always on for Ironlake */
2912         reg = FDI_TX_CTL(pipe);
2913         temp = I915_READ(reg);
2914         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2915                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2916
2917                 POSTING_READ(reg);
2918                 udelay(100);
2919         }
2920 }
2921
2922 static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2923 {
2924         struct drm_i915_private *dev_priv = dev->dev_private;
2925         u32 flags = I915_READ(SOUTH_CHICKEN1);
2926
2927         flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2928         I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2929         flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2930         I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2931         POSTING_READ(SOUTH_CHICKEN1);
2932 }
2933 static void ironlake_fdi_disable(struct drm_crtc *crtc)
2934 {
2935         struct drm_device *dev = crtc->dev;
2936         struct drm_i915_private *dev_priv = dev->dev_private;
2937         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2938         int pipe = intel_crtc->pipe;
2939         u32 reg, temp;
2940
2941         /* disable CPU FDI tx and PCH FDI rx */
2942         reg = FDI_TX_CTL(pipe);
2943         temp = I915_READ(reg);
2944         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2945         POSTING_READ(reg);
2946
2947         reg = FDI_RX_CTL(pipe);
2948         temp = I915_READ(reg);
2949         temp &= ~(0x7 << 16);
2950         temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2951         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2952
2953         POSTING_READ(reg);
2954         udelay(100);
2955
2956         /* Ironlake workaround, disable clock pointer after downing FDI */
2957         if (HAS_PCH_IBX(dev)) {
2958                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2959                 I915_WRITE(FDI_RX_CHICKEN(pipe),
2960                            I915_READ(FDI_RX_CHICKEN(pipe) &
2961                                      ~FDI_RX_PHASE_SYNC_POINTER_EN));
2962         } else if (HAS_PCH_CPT(dev)) {
2963                 cpt_phase_pointer_disable(dev, pipe);
2964         }
2965
2966         /* still set train pattern 1 */
2967         reg = FDI_TX_CTL(pipe);
2968         temp = I915_READ(reg);
2969         temp &= ~FDI_LINK_TRAIN_NONE;
2970         temp |= FDI_LINK_TRAIN_PATTERN_1;
2971         I915_WRITE(reg, temp);
2972
2973         reg = FDI_RX_CTL(pipe);
2974         temp = I915_READ(reg);
2975         if (HAS_PCH_CPT(dev)) {
2976                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2977                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2978         } else {
2979                 temp &= ~FDI_LINK_TRAIN_NONE;
2980                 temp |= FDI_LINK_TRAIN_PATTERN_1;
2981         }
2982         /* BPC in FDI rx is consistent with that in PIPECONF */
2983         temp &= ~(0x07 << 16);
2984         temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2985         I915_WRITE(reg, temp);
2986
2987         POSTING_READ(reg);
2988         udelay(100);
2989 }
2990
2991 /*
2992  * When we disable a pipe, we need to clear any pending scanline wait events
2993  * to avoid hanging the ring, which we assume we are waiting on.
2994  */
2995 static void intel_clear_scanline_wait(struct drm_device *dev)
2996 {
2997         struct drm_i915_private *dev_priv = dev->dev_private;
2998         struct intel_ring_buffer *ring;
2999         u32 tmp;
3000
3001         if (IS_GEN2(dev))
3002                 /* Can't break the hang on i8xx */
3003                 return;
3004
3005         ring = LP_RING(dev_priv);
3006         tmp = I915_READ_CTL(ring);
3007         if (tmp & RING_WAIT)
3008                 I915_WRITE_CTL(ring, tmp);
3009 }
3010
3011 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3012 {
3013         struct drm_i915_gem_object *obj;
3014         struct drm_i915_private *dev_priv;
3015
3016         if (crtc->fb == NULL)
3017                 return;
3018
3019         obj = to_intel_framebuffer(crtc->fb)->obj;
3020         dev_priv = crtc->dev->dev_private;
3021         wait_event(dev_priv->pending_flip_queue,
3022                    atomic_read(&obj->pending_flip) == 0);
3023 }
3024
3025 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
3026 {
3027         struct drm_device *dev = crtc->dev;
3028         struct drm_mode_config *mode_config = &dev->mode_config;
3029         struct intel_encoder *encoder;
3030
3031         /*
3032          * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
3033          * must be driven by its own crtc; no sharing is possible.
3034          */
3035         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
3036                 if (encoder->base.crtc != crtc)
3037                         continue;
3038
3039                 switch (encoder->type) {
3040                 case INTEL_OUTPUT_EDP:
3041                         if (!intel_encoder_is_pch_edp(&encoder->base))
3042                                 return false;
3043                         continue;
3044                 }
3045         }
3046
3047         return true;
3048 }
3049
3050 /*
3051  * Enable PCH resources required for PCH ports:
3052  *   - PCH PLLs
3053  *   - FDI training & RX/TX
3054  *   - update transcoder timings
3055  *   - DP transcoding bits
3056  *   - transcoder
3057  */
3058 static void ironlake_pch_enable(struct drm_crtc *crtc)
3059 {
3060         struct drm_device *dev = crtc->dev;
3061         struct drm_i915_private *dev_priv = dev->dev_private;
3062         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3063         int pipe = intel_crtc->pipe;
3064         u32 reg, temp, transc_sel;
3065
3066         /* For PCH output, training FDI link */
3067         dev_priv->display.fdi_link_train(crtc);
3068
3069         intel_enable_pch_pll(dev_priv, pipe);
3070
3071         if (HAS_PCH_CPT(dev)) {
3072                 transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
3073                         TRANSC_DPLLB_SEL;
3074
3075                 /* Be sure PCH DPLL SEL is set */
3076                 temp = I915_READ(PCH_DPLL_SEL);
3077                 if (pipe == 0) {
3078                         temp &= ~(TRANSA_DPLLB_SEL);
3079                         temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
3080                 } else if (pipe == 1) {
3081                         temp &= ~(TRANSB_DPLLB_SEL);
3082                         temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3083                 } else if (pipe == 2) {
3084                         temp &= ~(TRANSC_DPLLB_SEL);
3085                         temp |= (TRANSC_DPLL_ENABLE | transc_sel);
3086                 }
3087                 I915_WRITE(PCH_DPLL_SEL, temp);
3088         }
3089
3090         /* set transcoder timing, panel must allow it */
3091         assert_panel_unlocked(dev_priv, pipe);
3092         I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
3093         I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
3094         I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
3095
3096         I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
3097         I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
3098         I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
3099         I915_WRITE(TRANS_VSYNCSHIFT(pipe),  I915_READ(VSYNCSHIFT(pipe)));
3100
3101         intel_fdi_normal_train(crtc);
3102
3103         /* For PCH DP, enable TRANS_DP_CTL */
3104         if (HAS_PCH_CPT(dev) &&
3105             (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3106              intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3107                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
3108                 reg = TRANS_DP_CTL(pipe);
3109                 temp = I915_READ(reg);
3110                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
3111                           TRANS_DP_SYNC_MASK |
3112                           TRANS_DP_BPC_MASK);
3113                 temp |= (TRANS_DP_OUTPUT_ENABLE |
3114                          TRANS_DP_ENH_FRAMING);
3115                 temp |= bpc << 9; /* same format but at 11:9 */
3116
3117                 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3118                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3119                 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3120                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3121
3122                 switch (intel_trans_dp_port_sel(crtc)) {
3123                 case PCH_DP_B:
3124                         temp |= TRANS_DP_PORT_SEL_B;
3125                         break;
3126                 case PCH_DP_C:
3127                         temp |= TRANS_DP_PORT_SEL_C;
3128                         break;
3129                 case PCH_DP_D:
3130                         temp |= TRANS_DP_PORT_SEL_D;
3131                         break;
3132                 default:
3133                         DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
3134                         temp |= TRANS_DP_PORT_SEL_B;
3135                         break;
3136                 }
3137
3138                 I915_WRITE(reg, temp);
3139         }
3140
3141         intel_enable_transcoder(dev_priv, pipe);
3142 }
3143
3144 void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
3145 {
3146         struct drm_i915_private *dev_priv = dev->dev_private;
3147         int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
3148         u32 temp;
3149
3150         temp = I915_READ(dslreg);
3151         udelay(500);
3152         if (wait_for(I915_READ(dslreg) != temp, 5)) {
3153                 /* Without this, mode sets may fail silently on FDI */
3154                 I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
3155                 udelay(250);
3156                 I915_WRITE(tc2reg, 0);
3157                 if (wait_for(I915_READ(dslreg) != temp, 5))
3158                         DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
3159         }
3160 }
3161
3162 static void ironlake_crtc_enable(struct drm_crtc *crtc)
3163 {
3164         struct drm_device *dev = crtc->dev;
3165         struct drm_i915_private *dev_priv = dev->dev_private;
3166         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3167         int pipe = intel_crtc->pipe;
3168         int plane = intel_crtc->plane;
3169         u32 temp;
3170         bool is_pch_port;
3171
3172         if (intel_crtc->active)
3173                 return;
3174
3175         intel_crtc->active = true;
3176         intel_update_watermarks(dev);
3177
3178         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3179                 temp = I915_READ(PCH_LVDS);
3180                 if ((temp & LVDS_PORT_EN) == 0)
3181                         I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3182         }
3183
3184         is_pch_port = intel_crtc_driving_pch(crtc);
3185
3186         if (is_pch_port)
3187                 ironlake_fdi_pll_enable(crtc);
3188         else
3189                 ironlake_fdi_disable(crtc);
3190
3191         /* Enable panel fitting for LVDS */
3192         if (dev_priv->pch_pf_size &&
3193             (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
3194                 /* Force use of hard-coded filter coefficients
3195                  * as some pre-programmed values are broken,
3196                  * e.g. x201.
3197                  */
3198                 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3199                 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3200                 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3201         }
3202
3203         /*
3204          * On ILK+ LUT must be loaded before the pipe is running but with
3205          * clocks enabled
3206          */
3207         intel_crtc_load_lut(crtc);
3208
3209         intel_enable_pipe(dev_priv, pipe, is_pch_port);
3210         intel_enable_plane(dev_priv, plane, pipe);
3211
3212         if (is_pch_port)
3213                 ironlake_pch_enable(crtc);
3214
3215         mutex_lock(&dev->struct_mutex);
3216         intel_update_fbc(dev);
3217         mutex_unlock(&dev->struct_mutex);
3218
3219         intel_crtc_update_cursor(crtc, true);
3220 }
3221
3222 static void ironlake_crtc_disable(struct drm_crtc *crtc)
3223 {
3224         struct drm_device *dev = crtc->dev;
3225         struct drm_i915_private *dev_priv = dev->dev_private;
3226         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3227         int pipe = intel_crtc->pipe;
3228         int plane = intel_crtc->plane;
3229         u32 reg, temp;
3230
3231         if (!intel_crtc->active)
3232                 return;
3233
3234         intel_crtc_wait_for_pending_flips(crtc);
3235         drm_vblank_off(dev, pipe);
3236         intel_crtc_update_cursor(crtc, false);
3237
3238         intel_disable_plane(dev_priv, plane, pipe);
3239
3240         if (dev_priv->cfb_plane == plane)
3241                 intel_disable_fbc(dev);
3242
3243         intel_disable_pipe(dev_priv, pipe);
3244
3245         /* Disable PF */
3246         I915_WRITE(PF_CTL(pipe), 0);
3247         I915_WRITE(PF_WIN_SZ(pipe), 0);
3248
3249         ironlake_fdi_disable(crtc);
3250
3251         /* This is a horrible layering violation; we should be doing this in
3252          * the connector/encoder ->prepare instead, but we don't always have
3253          * enough information there about the config to know whether it will
3254          * actually be necessary or just cause undesired flicker.
3255          */
3256         intel_disable_pch_ports(dev_priv, pipe);
3257
3258         intel_disable_transcoder(dev_priv, pipe);
3259
3260         if (HAS_PCH_CPT(dev)) {
3261                 /* disable TRANS_DP_CTL */
3262                 reg = TRANS_DP_CTL(pipe);
3263                 temp = I915_READ(reg);
3264                 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
3265                 temp |= TRANS_DP_PORT_SEL_NONE;
3266                 I915_WRITE(reg, temp);
3267
3268                 /* disable DPLL_SEL */
3269                 temp = I915_READ(PCH_DPLL_SEL);
3270                 switch (pipe) {
3271                 case 0:
3272                         temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
3273                         break;
3274                 case 1:
3275                         temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3276                         break;
3277                 case 2:
3278                         /* C shares PLL A or B */
3279                         temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
3280                         break;
3281                 default:
3282                         BUG(); /* wtf */
3283                 }
3284                 I915_WRITE(PCH_DPLL_SEL, temp);
3285         }
3286
3287         /* disable PCH DPLL */
3288         if (!intel_crtc->no_pll)
3289                 intel_disable_pch_pll(dev_priv, pipe);
3290
3291         /* Switch from PCDclk to Rawclk */
3292         reg = FDI_RX_CTL(pipe);
3293         temp = I915_READ(reg);
3294         I915_WRITE(reg, temp & ~FDI_PCDCLK);
3295
3296         /* Disable CPU FDI TX PLL */
3297         reg = FDI_TX_CTL(pipe);
3298         temp = I915_READ(reg);
3299         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3300
3301         POSTING_READ(reg);
3302         udelay(100);
3303
3304         reg = FDI_RX_CTL(pipe);
3305         temp = I915_READ(reg);
3306         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3307
3308         /* Wait for the clocks to turn off. */
3309         POSTING_READ(reg);
3310         udelay(100);
3311
3312         intel_crtc->active = false;
3313         intel_update_watermarks(dev);
3314
3315         mutex_lock(&dev->struct_mutex);
3316         intel_update_fbc(dev);
3317         intel_clear_scanline_wait(dev);
3318         mutex_unlock(&dev->struct_mutex);
3319 }
3320
3321 static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3322 {
3323         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3324         int pipe = intel_crtc->pipe;
3325         int plane = intel_crtc->plane;
3326
3327         /* XXX: When our outputs are all unaware of DPMS modes other than off
3328          * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3329          */
3330         switch (mode) {
3331         case DRM_MODE_DPMS_ON:
3332         case DRM_MODE_DPMS_STANDBY:
3333         case DRM_MODE_DPMS_SUSPEND:
3334                 DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3335                 ironlake_crtc_enable(crtc);
3336                 break;
3337
3338         case DRM_MODE_DPMS_OFF:
3339                 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3340                 ironlake_crtc_disable(crtc);
3341                 break;
3342         }
3343 }
3344
3345 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3346 {
3347         if (!enable && intel_crtc->overlay) {
3348                 struct drm_device *dev = intel_crtc->base.dev;
3349                 struct drm_i915_private *dev_priv = dev->dev_private;
3350
3351                 mutex_lock(&dev->struct_mutex);
3352                 dev_priv->mm.interruptible = false;
3353                 (void) intel_overlay_switch_off(intel_crtc->overlay);
3354                 dev_priv->mm.interruptible = true;
3355                 mutex_unlock(&dev->struct_mutex);
3356         }
3357
3358         /* Let userspace switch the overlay on again. In most cases userspace
3359          * has to recompute where to put it anyway.
3360          */
3361 }
3362
3363 static void i9xx_crtc_enable(struct drm_crtc *crtc)
3364 {
3365         struct drm_device *dev = crtc->dev;
3366         struct drm_i915_private *dev_priv = dev->dev_private;
3367         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3368         int pipe = intel_crtc->pipe;
3369         int plane = intel_crtc->plane;
3370
3371         if (intel_crtc->active)
3372                 return;
3373
3374         intel_crtc->active = true;
3375         intel_update_watermarks(dev);
3376
3377         intel_enable_pll(dev_priv, pipe);
3378         intel_enable_pipe(dev_priv, pipe, false);
3379         intel_enable_plane(dev_priv, plane, pipe);
3380
3381         intel_crtc_load_lut(crtc);
3382         intel_update_fbc(dev);
3383
3384         /* Give the overlay scaler a chance to enable if it's on this pipe */
3385         intel_crtc_dpms_overlay(intel_crtc, true);
3386         intel_crtc_update_cursor(crtc, true);
3387 }
3388
3389 static void i9xx_crtc_disable(struct drm_crtc *crtc)
3390 {
3391         struct drm_device *dev = crtc->dev;
3392         struct drm_i915_private *dev_priv = dev->dev_private;
3393         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3394         int pipe = intel_crtc->pipe;
3395         int plane = intel_crtc->plane;
3396
3397         if (!intel_crtc->active)
3398                 return;
3399
3400         /* Give the overlay scaler a chance to disable if it's on this pipe */
3401         intel_crtc_wait_for_pending_flips(crtc);
3402         drm_vblank_off(dev, pipe);
3403         intel_crtc_dpms_overlay(intel_crtc, false);
3404         intel_crtc_update_cursor(crtc, false);
3405
3406         if (dev_priv->cfb_plane == plane)
3407                 intel_disable_fbc(dev);
3408
3409         intel_disable_plane(dev_priv, plane, pipe);
3410         intel_disable_pipe(dev_priv, pipe);
3411         intel_disable_pll(dev_priv, pipe);
3412
3413         intel_crtc->active = false;
3414         intel_update_fbc(dev);
3415         intel_update_watermarks(dev);
3416         intel_clear_scanline_wait(dev);
3417 }
3418
3419 static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3420 {
3421         /* XXX: When our outputs are all unaware of DPMS modes other than off
3422          * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3423          */
3424         switch (mode) {
3425         case DRM_MODE_DPMS_ON:
3426         case DRM_MODE_DPMS_STANDBY:
3427         case DRM_MODE_DPMS_SUSPEND:
3428                 i9xx_crtc_enable(crtc);
3429                 break;
3430         case DRM_MODE_DPMS_OFF:
3431                 i9xx_crtc_disable(crtc);
3432                 break;
3433         }
3434 }
3435
3436 /**
3437  * Sets the power management mode of the pipe and plane.
3438  */
3439 static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3440 {
3441         struct drm_device *dev = crtc->dev;
3442         struct drm_i915_private *dev_priv = dev->dev_private;
3443         struct drm_i915_master_private *master_priv;
3444         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3445         int pipe = intel_crtc->pipe;
3446         bool enabled;
3447
3448         if (intel_crtc->dpms_mode == mode)
3449                 return;
3450
3451         intel_crtc->dpms_mode = mode;
3452
3453         dev_priv->display.dpms(crtc, mode);
3454
3455         if (!dev->primary->master)
3456                 return;
3457
3458         master_priv = dev->primary->master->driver_priv;
3459         if (!master_priv->sarea_priv)
3460                 return;
3461
3462         enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
3463
3464         switch (pipe) {
3465         case 0:
3466                 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3467                 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3468                 break;
3469         case 1:
3470                 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3471                 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3472                 break;
3473         default:
3474                 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3475                 break;
3476         }
3477 }
3478
3479 static void intel_crtc_disable(struct drm_crtc *crtc)
3480 {
3481         struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3482         struct drm_device *dev = crtc->dev;
3483
3484         crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
3485         assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3486         assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3487
3488         if (crtc->fb) {
3489                 mutex_lock(&dev->struct_mutex);
3490                 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
3491                 mutex_unlock(&dev->struct_mutex);
3492         }
3493 }
3494
3495 /* Prepare for a mode set.
3496  *
3497  * Note we could be a lot smarter here.  We need to figure out which outputs
3498  * will be enabled, which disabled (in short, how the config will changes)
3499  * and perform the minimum necessary steps to accomplish that, e.g. updating
3500  * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3501  * panel fitting is in the proper state, etc.
3502  */
3503 static void i9xx_crtc_prepare(struct drm_crtc *crtc)
3504 {
3505         i9xx_crtc_disable(crtc);
3506 }
3507
3508 static void i9xx_crtc_commit(struct drm_crtc *crtc)
3509 {
3510         i9xx_crtc_enable(crtc);
3511 }
3512
3513 static void ironlake_crtc_prepare(struct drm_crtc *crtc)
3514 {
3515         ironlake_crtc_disable(crtc);
3516 }
3517
3518 static void ironlake_crtc_commit(struct drm_crtc *crtc)
3519 {
3520         ironlake_crtc_enable(crtc);
3521 }
3522
3523 void intel_encoder_prepare(struct drm_encoder *encoder)
3524 {
3525         struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3526         /* lvds has its own version of prepare see intel_lvds_prepare */
3527         encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3528 }
3529
3530 void intel_encoder_commit(struct drm_encoder *encoder)
3531 {
3532         struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3533         struct drm_device *dev = encoder->dev;
3534         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3535         struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
3536
3537         /* lvds has its own version of commit see intel_lvds_commit */
3538         encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
3539
3540         if (HAS_PCH_CPT(dev))
3541                 intel_cpt_verify_modeset(dev, intel_crtc->pipe);
3542 }
3543
3544 void intel_encoder_destroy(struct drm_encoder *encoder)
3545 {
3546         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3547
3548         drm_encoder_cleanup(encoder);
3549         kfree(intel_encoder);
3550 }
3551
3552 static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3553                                   struct drm_display_mode *mode,
3554                                   struct drm_display_mode *adjusted_mode)
3555 {
3556         struct drm_device *dev = crtc->dev;
3557
3558         if (HAS_PCH_SPLIT(dev)) {
3559                 /* FDI link clock is fixed at 2.7G */
3560                 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3561                         return false;
3562         }
3563
3564         /* All interlaced capable intel hw wants timings in frames. */
3565         drm_mode_set_crtcinfo(adjusted_mode, 0);
3566
3567         return true;
3568 }
3569
3570 static int valleyview_get_display_clock_speed(struct drm_device *dev)
3571 {
3572         return 400000; /* FIXME */
3573 }
3574
3575 static int i945_get_display_clock_speed(struct drm_device *dev)
3576 {
3577         return 400000;
3578 }
3579
3580 static int i915_get_display_clock_speed(struct drm_device *dev)
3581 {
3582         return 333000;
3583 }
3584
3585 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3586 {
3587         return 200000;
3588 }
3589
3590 static int i915gm_get_display_clock_speed(struct drm_device *dev)
3591 {
3592         u16 gcfgc = 0;
3593
3594         pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3595
3596         if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3597                 return 133000;
3598         else {
3599                 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3600                 case GC_DISPLAY_CLOCK_333_MHZ:
3601                         return 333000;
3602                 default:
3603                 case GC_DISPLAY_CLOCK_190_200_MHZ:
3604                         return 190000;
3605                 }
3606         }
3607 }
3608
3609 static int i865_get_display_clock_speed(struct drm_device *dev)
3610 {
3611         return 266000;
3612 }
3613
3614 static int i855_get_display_clock_speed(struct drm_device *dev)
3615 {
3616         u16 hpllcc = 0;
3617         /* Assume that the hardware is in the high speed state.  This
3618          * should be the default.
3619          */
3620         switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3621         case GC_CLOCK_133_200:
3622         case GC_CLOCK_100_200:
3623                 return 200000;
3624         case GC_CLOCK_166_250:
3625                 return 250000;
3626         case GC_CLOCK_100_133:
3627                 return 133000;
3628         }
3629
3630         /* Shouldn't happen */
3631         return 0;
3632 }
3633
3634 static int i830_get_display_clock_speed(struct drm_device *dev)
3635 {
3636         return 133000;
3637 }
3638
3639 struct fdi_m_n {
3640         u32        tu;
3641         u32        gmch_m;
3642         u32        gmch_n;
3643         u32        link_m;
3644         u32        link_n;
3645 };
3646
3647 static void
3648 fdi_reduce_ratio(u32 *num, u32 *den)
3649 {
3650         while (*num > 0xffffff || *den > 0xffffff) {
3651                 *num >>= 1;
3652                 *den >>= 1;
3653         }
3654 }
3655
3656 static void
3657 ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3658                      int link_clock, struct fdi_m_n *m_n)
3659 {
3660         m_n->tu = 64; /* default size */
3661
3662         /* BUG_ON(pixel_clock > INT_MAX / 36); */
3663         m_n->gmch_m = bits_per_pixel * pixel_clock;
3664         m_n->gmch_n = link_clock * nlanes * 8;
3665         fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3666
3667         m_n->link_m = pixel_clock;
3668         m_n->link_n = link_clock;
3669         fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3670 }
3671
3672
3673 struct intel_watermark_params {
3674         unsigned long fifo_size;
3675         unsigned long max_wm;
3676         unsigned long default_wm;
3677         unsigned long guard_size;
3678         unsigned long cacheline_size;
3679 };
3680
3681 /* Pineview has different values for various configs */
3682 static const struct intel_watermark_params pineview_display_wm = {
3683         PINEVIEW_DISPLAY_FIFO,
3684         PINEVIEW_MAX_WM,
3685         PINEVIEW_DFT_WM,
3686         PINEVIEW_GUARD_WM,
3687         PINEVIEW_FIFO_LINE_SIZE
3688 };
3689 static const struct intel_watermark_params pineview_display_hplloff_wm = {
3690         PINEVIEW_DISPLAY_FIFO,
3691         PINEVIEW_MAX_WM,
3692         PINEVIEW_DFT_HPLLOFF_WM,
3693         PINEVIEW_GUARD_WM,
3694         PINEVIEW_FIFO_LINE_SIZE
3695 };
3696 static const struct intel_watermark_params pineview_cursor_wm = {
3697         PINEVIEW_CURSOR_FIFO,
3698         PINEVIEW_CURSOR_MAX_WM,
3699         PINEVIEW_CURSOR_DFT_WM,
3700         PINEVIEW_CURSOR_GUARD_WM,
3701         PINEVIEW_FIFO_LINE_SIZE,
3702 };
3703 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
3704         PINEVIEW_CURSOR_FIFO,
3705         PINEVIEW_CURSOR_MAX_WM,
3706         PINEVIEW_CURSOR_DFT_WM,
3707         PINEVIEW_CURSOR_GUARD_WM,
3708         PINEVIEW_FIFO_LINE_SIZE
3709 };
3710 static const struct intel_watermark_params g4x_wm_info = {
3711         G4X_FIFO_SIZE,
3712         G4X_MAX_WM,
3713         G4X_MAX_WM,
3714         2,
3715         G4X_FIFO_LINE_SIZE,
3716 };
3717 static const struct intel_watermark_params g4x_cursor_wm_info = {
3718         I965_CURSOR_FIFO,
3719         I965_CURSOR_MAX_WM,
3720         I965_CURSOR_DFT_WM,
3721         2,
3722         G4X_FIFO_LINE_SIZE,
3723 };
3724 static const struct intel_watermark_params valleyview_wm_info = {
3725         VALLEYVIEW_FIFO_SIZE,
3726         VALLEYVIEW_MAX_WM,
3727         VALLEYVIEW_MAX_WM,
3728         2,
3729         G4X_FIFO_LINE_SIZE,
3730 };
3731 static const struct intel_watermark_params valleyview_cursor_wm_info = {
3732         I965_CURSOR_FIFO,
3733         VALLEYVIEW_CURSOR_MAX_WM,
3734         I965_CURSOR_DFT_WM,
3735         2,
3736         G4X_FIFO_LINE_SIZE,
3737 };
3738 static const struct intel_watermark_params i965_cursor_wm_info = {
3739         I965_CURSOR_FIFO,
3740         I965_CURSOR_MAX_WM,
3741         I965_CURSOR_DFT_WM,
3742         2,
3743         I915_FIFO_LINE_SIZE,
3744 };
3745 static const struct intel_watermark_params i945_wm_info = {
3746         I945_FIFO_SIZE,
3747         I915_MAX_WM,
3748         1,
3749         2,
3750         I915_FIFO_LINE_SIZE
3751 };
3752 static const struct intel_watermark_params i915_wm_info = {
3753         I915_FIFO_SIZE,
3754         I915_MAX_WM,
3755         1,
3756         2,
3757         I915_FIFO_LINE_SIZE
3758 };
3759 static const struct intel_watermark_params i855_wm_info = {
3760         I855GM_FIFO_SIZE,
3761         I915_MAX_WM,
3762         1,
3763         2,
3764         I830_FIFO_LINE_SIZE
3765 };
3766 static const struct intel_watermark_params i830_wm_info = {
3767         I830_FIFO_SIZE,
3768         I915_MAX_WM,
3769         1,
3770         2,
3771         I830_FIFO_LINE_SIZE
3772 };
3773
3774 static const struct intel_watermark_params ironlake_display_wm_info = {
3775         ILK_DISPLAY_FIFO,
3776         ILK_DISPLAY_MAXWM,
3777         ILK_DISPLAY_DFTWM,
3778         2,
3779         ILK_FIFO_LINE_SIZE
3780 };
3781 static const struct intel_watermark_params ironlake_cursor_wm_info = {
3782         ILK_CURSOR_FIFO,
3783         ILK_CURSOR_MAXWM,
3784         ILK_CURSOR_DFTWM,
3785         2,
3786         ILK_FIFO_LINE_SIZE
3787 };
3788 static const struct intel_watermark_params ironlake_display_srwm_info = {
3789         ILK_DISPLAY_SR_FIFO,
3790         ILK_DISPLAY_MAX_SRWM,
3791         ILK_DISPLAY_DFT_SRWM,
3792         2,
3793         ILK_FIFO_LINE_SIZE
3794 };
3795 static const struct intel_watermark_params ironlake_cursor_srwm_info = {
3796         ILK_CURSOR_SR_FIFO,
3797         ILK_CURSOR_MAX_SRWM,
3798         ILK_CURSOR_DFT_SRWM,
3799         2,
3800         ILK_FIFO_LINE_SIZE
3801 };
3802
3803 static const struct intel_watermark_params sandybridge_display_wm_info = {
3804         SNB_DISPLAY_FIFO,
3805         SNB_DISPLAY_MAXWM,
3806         SNB_DISPLAY_DFTWM,
3807         2,
3808         SNB_FIFO_LINE_SIZE
3809 };
3810 static const struct intel_watermark_params sandybridge_cursor_wm_info = {
3811         SNB_CURSOR_FIFO,
3812         SNB_CURSOR_MAXWM,
3813         SNB_CURSOR_DFTWM,
3814         2,
3815         SNB_FIFO_LINE_SIZE
3816 };
3817 static const struct intel_watermark_params sandybridge_display_srwm_info = {
3818         SNB_DISPLAY_SR_FIFO,
3819         SNB_DISPLAY_MAX_SRWM,
3820         SNB_DISPLAY_DFT_SRWM,
3821         2,
3822         SNB_FIFO_LINE_SIZE
3823 };
3824 static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
3825         SNB_CURSOR_SR_FIFO,
3826         SNB_CURSOR_MAX_SRWM,
3827         SNB_CURSOR_DFT_SRWM,
3828         2,
3829         SNB_FIFO_LINE_SIZE
3830 };
3831
3832
3833 /**
3834  * intel_calculate_wm - calculate watermark level
3835  * @clock_in_khz: pixel clock
3836  * @wm: chip FIFO params
3837  * @pixel_size: display pixel size
3838  * @latency_ns: memory latency for the platform
3839  *
3840  * Calculate the watermark level (the level at which the display plane will
3841  * start fetching from memory again).  Each chip has a different display
3842  * FIFO size and allocation, so the caller needs to figure that out and pass
3843  * in the correct intel_watermark_params structure.
3844  *
3845  * As the pixel clock runs, the FIFO will be drained at a rate that depends
3846  * on the pixel size.  When it reaches the watermark level, it'll start
3847  * fetching FIFO line sized based chunks from memory until the FIFO fills
3848  * past the watermark point.  If the FIFO drains completely, a FIFO underrun
3849  * will occur, and a display engine hang could result.
3850  */
3851 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
3852                                         const struct intel_watermark_params *wm,
3853                                         int fifo_size,
3854                                         int pixel_size,
3855                                         unsigned long latency_ns)
3856 {
3857         long entries_required, wm_size;
3858
3859         /*
3860          * Note: we need to make sure we don't overflow for various clock &
3861          * latency values.
3862          * clocks go from a few thousand to several hundred thousand.
3863          * latency is usually a few thousand
3864          */
3865         entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
3866                 1000;
3867         entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
3868
3869         DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
3870
3871         wm_size = fifo_size - (entries_required + wm->guard_size);
3872
3873         DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
3874
3875         /* Don't promote wm_size to unsigned... */
3876         if (wm_size > (long)wm->max_wm)
3877                 wm_size = wm->max_wm;
3878         if (wm_size <= 0)
3879                 wm_size = wm->default_wm;
3880         return wm_size;
3881 }
3882
3883 struct cxsr_latency {
3884         int is_desktop;
3885         int is_ddr3;
3886         unsigned long fsb_freq;
3887         unsigned long mem_freq;
3888         unsigned long display_sr;
3889         unsigned long display_hpll_disable;
3890         unsigned long cursor_sr;
3891         unsigned long cursor_hpll_disable;
3892 };
3893
3894 static const struct cxsr_latency cxsr_latency_table[] = {
3895         {1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
3896         {1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
3897         {1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
3898         {1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
3899         {1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
3900
3901         {1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
3902         {1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
3903         {1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
3904         {1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
3905         {1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
3906
3907         {1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
3908         {1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
3909         {1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
3910         {1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
3911         {1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
3912
3913         {0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
3914         {0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
3915         {0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
3916         {0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
3917         {0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
3918
3919         {0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
3920         {0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
3921         {0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
3922         {0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
3923         {0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
3924
3925         {0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
3926         {0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
3927         {0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
3928         {0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
3929         {0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
3930 };
3931
3932 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
3933                                                          int is_ddr3,
3934                                                          int fsb,
3935                                                          int mem)
3936 {
3937         const struct cxsr_latency *latency;
3938         int i;
3939
3940         if (fsb == 0 || mem == 0)
3941                 return NULL;
3942
3943         for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
3944                 latency = &cxsr_latency_table[i];
3945                 if (is_desktop == latency->is_desktop &&
3946                     is_ddr3 == latency->is_ddr3 &&
3947                     fsb == latency->fsb_freq && mem == latency->mem_freq)
3948                         return latency;
3949         }
3950
3951         DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3952
3953         return NULL;
3954 }
3955
3956 static void pineview_disable_cxsr(struct drm_device *dev)
3957 {
3958         struct drm_i915_private *dev_priv = dev->dev_private;
3959
3960         /* deactivate cxsr */
3961         I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
3962 }
3963
3964 /*
3965  * Latency for FIFO fetches is dependent on several factors:
3966  *   - memory configuration (speed, channels)
3967  *   - chipset
3968  *   - current MCH state
3969  * It can be fairly high in some situations, so here we assume a fairly
3970  * pessimal value.  It's a tradeoff between extra memory fetches (if we
3971  * set this value too high, the FIFO will fetch frequently to stay full)
3972  * and power consumption (set it too low to save power and we might see
3973  * FIFO underruns and display "flicker").
3974  *
3975  * A value of 5us seems to be a good balance; safe for very low end
3976  * platforms but not overly aggressive on lower latency configs.
3977  */
3978 static const int latency_ns = 5000;
3979
3980 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
3981 {
3982         struct drm_i915_private *dev_priv = dev->dev_private;
3983         uint32_t dsparb = I915_READ(DSPARB);
3984         int size;
3985
3986         size = dsparb & 0x7f;
3987         if (plane)
3988                 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
3989
3990         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3991                       plane ? "B" : "A", size);
3992
3993         return size;
3994 }
3995
3996 static int i85x_get_fifo_size(struct drm_device *dev, int plane)
3997 {
3998         struct drm_i915_private *dev_priv = dev->dev_private;
3999         uint32_t dsparb = I915_READ(DSPARB);
4000         int size;
4001
4002         size = dsparb & 0x1ff;
4003         if (plane)
4004                 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
4005         size >>= 1; /* Convert to cachelines */
4006
4007         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
4008                       plane ? "B" : "A", size);
4009
4010         return size;
4011 }
4012
4013 static int i845_get_fifo_size(struct drm_device *dev, int plane)
4014 {
4015         struct drm_i915_private *dev_priv = dev->dev_private;
4016         uint32_t dsparb = I915_READ(DSPARB);
4017         int size;
4018
4019         size = dsparb & 0x7f;
4020         size >>= 2; /* Convert to cachelines */
4021
4022         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
4023                       plane ? "B" : "A",
4024                       size);
4025
4026         return size;
4027 }
4028
4029 static int i830_get_fifo_size(struct drm_device *dev, int plane)
4030 {
4031         struct drm_i915_private *dev_priv = dev->dev_private;
4032         uint32_t dsparb = I915_READ(DSPARB);
4033         int size;
4034
4035         size = dsparb & 0x7f;
4036         size >>= 1; /* Convert to cachelines */
4037
4038         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
4039                       plane ? "B" : "A", size);
4040
4041         return size;
4042 }
4043
4044 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
4045 {
4046         struct drm_crtc *crtc, *enabled = NULL;
4047
4048         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4049                 if (crtc->enabled && crtc->fb) {
4050                         if (enabled)
4051                                 return NULL;
4052                         enabled = crtc;
4053                 }
4054         }
4055
4056         return enabled;
4057 }
4058
4059 static void pineview_update_wm(struct drm_device *dev)
4060 {
4061         struct drm_i915_private *dev_priv = dev->dev_private;
4062         struct drm_crtc *crtc;
4063         const struct cxsr_latency *latency;
4064         u32 reg;
4065         unsigned long wm;
4066
4067         latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
4068                                          dev_priv->fsb_freq, dev_priv->mem_freq);
4069         if (!latency) {
4070                 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
4071                 pineview_disable_cxsr(dev);
4072                 return;
4073         }
4074
4075         crtc = single_enabled_crtc(dev);
4076         if (crtc) {
4077                 int clock = crtc->mode.clock;
4078                 int pixel_size = crtc->fb->bits_per_pixel / 8;
4079
4080                 /* Display SR */
4081                 wm = intel_calculate_wm(clock, &pineview_display_wm,
4082                                         pineview_display_wm.fifo_size,
4083                                         pixel_size, latency->display_sr);
4084                 reg = I915_READ(DSPFW1);
4085                 reg &= ~DSPFW_SR_MASK;
4086                 reg |= wm << DSPFW_SR_SHIFT;
4087                 I915_WRITE(DSPFW1, reg);
4088                 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
4089
4090                 /* cursor SR */
4091                 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
4092                                         pineview_display_wm.fifo_size,
4093                                         pixel_size, latency->cursor_sr);
4094                 reg = I915_READ(DSPFW3);
4095                 reg &= ~DSPFW_CURSOR_SR_MASK;
4096                 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
4097                 I915_WRITE(DSPFW3, reg);
4098
4099                 /* Display HPLL off SR */
4100                 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
4101                                         pineview_display_hplloff_wm.fifo_size,
4102                                         pixel_size, latency->display_hpll_disable);
4103                 reg = I915_READ(DSPFW3);
4104                 reg &= ~DSPFW_HPLL_SR_MASK;
4105                 reg |= wm & DSPFW_HPLL_SR_MASK;
4106                 I915_WRITE(DSPFW3, reg);
4107
4108                 /* cursor HPLL off SR */
4109                 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
4110                                         pineview_display_hplloff_wm.fifo_size,
4111                                         pixel_size, latency->cursor_hpll_disable);
4112                 reg = I915_READ(DSPFW3);
4113                 reg &= ~DSPFW_HPLL_CURSOR_MASK;
4114                 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
4115                 I915_WRITE(DSPFW3, reg);
4116                 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
4117
4118                 /* activate cxsr */
4119                 I915_WRITE(DSPFW3,
4120                            I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
4121                 DRM_DEBUG_KMS("Self-refresh is enabled\n");
4122         } else {
4123                 pineview_disable_cxsr(dev);
4124                 DRM_DEBUG_KMS("Self-refresh is disabled\n");
4125         }
4126 }
4127
4128 static bool g4x_compute_wm0(struct drm_device *dev,
4129                             int plane,
4130                             const struct intel_watermark_params *display,
4131                             int display_latency_ns,
4132                             const struct intel_watermark_params *cursor,
4133                             int cursor_latency_ns,
4134                             int *plane_wm,
4135                             int *cursor_wm)
4136 {
4137         struct drm_crtc *crtc;
4138         int htotal, hdisplay, clock, pixel_size;
4139         int line_time_us, line_count;
4140         int entries, tlb_miss;
4141
4142         crtc = intel_get_crtc_for_plane(dev, plane);
4143         if (crtc->fb == NULL || !crtc->enabled) {
4144                 *cursor_wm = cursor->guard_size;
4145                 *plane_wm = display->guard_size;
4146                 return false;
4147         }
4148
4149         htotal = crtc->mode.htotal;
4150         hdisplay = crtc->mode.hdisplay;
4151         clock = crtc->mode.clock;
4152         pixel_size = crtc->fb->bits_per_pixel / 8;
4153
4154         /* Use the small buffer method to calculate plane watermark */
4155         entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4156         tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
4157         if (tlb_miss > 0)
4158                 entries += tlb_miss;
4159         entries = DIV_ROUND_UP(entries, display->cacheline_size);
4160         *plane_wm = entries + display->guard_size;
4161         if (*plane_wm > (int)display->max_wm)
4162                 *plane_wm = display->max_wm;
4163
4164         /* Use the large buffer method to calculate cursor watermark */
4165         line_time_us = ((htotal * 1000) / clock);
4166         line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
4167         entries = line_count * 64 * pixel_size;
4168         tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
4169         if (tlb_miss > 0)
4170                 entries += tlb_miss;
4171         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4172         *cursor_wm = entries + cursor->guard_size;
4173         if (*cursor_wm > (int)cursor->max_wm)
4174                 *cursor_wm = (int)cursor->max_wm;
4175
4176         return true;
4177 }
4178
4179 /*
4180  * Check the wm result.
4181  *
4182  * If any calculated watermark values is larger than the maximum value that
4183  * can be programmed into the associated watermark register, that watermark
4184  * must be disabled.
4185  */
4186 static bool g4x_check_srwm(struct drm_device *dev,
4187                            int display_wm, int cursor_wm,
4188                            const struct intel_watermark_params *display,
4189                            const struct intel_watermark_params *cursor)
4190 {
4191         DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
4192                       display_wm, cursor_wm);
4193
4194         if (display_wm > display->max_wm) {
4195                 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
4196                               display_wm, display->max_wm);
4197                 return false;
4198         }
4199
4200         if (cursor_wm > cursor->max_wm) {
4201                 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
4202                               cursor_wm, cursor->max_wm);
4203                 return false;
4204         }
4205
4206         if (!(display_wm || cursor_wm)) {
4207                 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
4208                 return false;
4209         }
4210
4211         return true;
4212 }
4213
4214 static bool g4x_compute_srwm(struct drm_device *dev,
4215                              int plane,
4216                              int latency_ns,
4217                              const struct intel_watermark_params *display,
4218                              const struct intel_watermark_params *cursor,
4219                              int *display_wm, int *cursor_wm)
4220 {
4221         struct drm_crtc *crtc;
4222         int hdisplay, htotal, pixel_size, clock;
4223         unsigned long line_time_us;
4224         int line_count, line_size;
4225         int small, large;
4226         int entries;
4227
4228         if (!latency_ns) {
4229                 *display_wm = *cursor_wm = 0;
4230                 return false;
4231         }
4232
4233         crtc = intel_get_crtc_for_plane(dev, plane);
4234         hdisplay = crtc->mode.hdisplay;
4235         htotal = crtc->mode.htotal;
4236         clock = crtc->mode.clock;
4237         pixel_size = crtc->fb->bits_per_pixel / 8;
4238
4239         line_time_us = (htotal * 1000) / clock;
4240         line_count = (latency_ns / line_time_us + 1000) / 1000;
4241         line_size = hdisplay * pixel_size;
4242
4243         /* Use the minimum of the small and large buffer method for primary */
4244         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4245         large = line_count * line_size;
4246
4247         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4248         *display_wm = entries + display->guard_size;
4249
4250         /* calculate the self-refresh watermark for display cursor */
4251         entries = line_count * pixel_size * 64;
4252         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4253         *cursor_wm = entries + cursor->guard_size;
4254
4255         return g4x_check_srwm(dev,
4256                               *display_wm, *cursor_wm,
4257                               display, cursor);
4258 }
4259
4260 static bool vlv_compute_drain_latency(struct drm_device *dev,
4261                                      int plane,
4262                                      int *plane_prec_mult,
4263                                      int *plane_dl,
4264                                      int *cursor_prec_mult,
4265                                      int *cursor_dl)
4266 {
4267         struct drm_crtc *crtc;
4268         int clock, pixel_size;
4269         int entries;
4270
4271         crtc = intel_get_crtc_for_plane(dev, plane);
4272         if (crtc->fb == NULL || !crtc->enabled)
4273                 return false;
4274
4275         clock = crtc->mode.clock;       /* VESA DOT Clock */
4276         pixel_size = crtc->fb->bits_per_pixel / 8;      /* BPP */
4277
4278         entries = (clock / 1000) * pixel_size;
4279         *plane_prec_mult = (entries > 256) ?
4280                 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
4281         *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
4282                                                      pixel_size);
4283
4284         entries = (clock / 1000) * 4;   /* BPP is always 4 for cursor */
4285         *cursor_prec_mult = (entries > 256) ?
4286                 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
4287         *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
4288
4289         return true;
4290 }
4291
4292 /*
4293  * Update drain latency registers of memory arbiter
4294  *
4295  * Valleyview SoC has a new memory arbiter and needs drain latency registers
4296  * to be programmed. Each plane has a drain latency multiplier and a drain
4297  * latency value.
4298  */
4299
4300 static void vlv_update_drain_latency(struct drm_device *dev)
4301 {
4302         struct drm_i915_private *dev_priv = dev->dev_private;
4303         int planea_prec, planea_dl, planeb_prec, planeb_dl;
4304         int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
4305         int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
4306                                                         either 16 or 32 */
4307
4308         /* For plane A, Cursor A */
4309         if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
4310                                       &cursor_prec_mult, &cursora_dl)) {
4311                 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
4312                         DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
4313                 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
4314                         DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
4315
4316                 I915_WRITE(VLV_DDL1, cursora_prec |
4317                                 (cursora_dl << DDL_CURSORA_SHIFT) |
4318                                 planea_prec | planea_dl);
4319         }
4320
4321         /* For plane B, Cursor B */
4322         if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
4323                                       &cursor_prec_mult, &cursorb_dl)) {
4324                 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
4325                         DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
4326                 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
4327                         DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
4328
4329                 I915_WRITE(VLV_DDL2, cursorb_prec |
4330                                 (cursorb_dl << DDL_CURSORB_SHIFT) |
4331                                 planeb_prec | planeb_dl);
4332         }
4333 }
4334
4335 #define single_plane_enabled(mask) is_power_of_2(mask)
4336
4337 static void valleyview_update_wm(struct drm_device *dev)
4338 {
4339         static const int sr_latency_ns = 12000;
4340         struct drm_i915_private *dev_priv = dev->dev_private;
4341         int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
4342         int plane_sr, cursor_sr;
4343         unsigned int enabled = 0;
4344
4345         vlv_update_drain_latency(dev);
4346
4347         if (g4x_compute_wm0(dev, 0,
4348                             &valleyview_wm_info, latency_ns,
4349                             &valleyview_cursor_wm_info, latency_ns,
4350                             &planea_wm, &cursora_wm))
4351                 enabled |= 1;
4352
4353         if (g4x_compute_wm0(dev, 1,
4354                             &valleyview_wm_info, latency_ns,
4355                             &valleyview_cursor_wm_info, latency_ns,
4356                             &planeb_wm, &cursorb_wm))
4357                 enabled |= 2;
4358
4359         plane_sr = cursor_sr = 0;
4360         if (single_plane_enabled(enabled) &&
4361             g4x_compute_srwm(dev, ffs(enabled) - 1,
4362                              sr_latency_ns,
4363                              &valleyview_wm_info,
4364                              &valleyview_cursor_wm_info,
4365                              &plane_sr, &cursor_sr))
4366                 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
4367         else
4368                 I915_WRITE(FW_BLC_SELF_VLV,
4369                            I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
4370
4371         DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
4372                       planea_wm, cursora_wm,
4373                       planeb_wm, cursorb_wm,
4374                       plane_sr, cursor_sr);
4375
4376         I915_WRITE(DSPFW1,
4377                    (plane_sr << DSPFW_SR_SHIFT) |
4378                    (cursorb_wm << DSPFW_CURSORB_SHIFT) |
4379                    (planeb_wm << DSPFW_PLANEB_SHIFT) |
4380                    planea_wm);
4381         I915_WRITE(DSPFW2,
4382                    (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
4383                    (cursora_wm << DSPFW_CURSORA_SHIFT));
4384         I915_WRITE(DSPFW3,
4385                    (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
4386 }
4387
4388 static void g4x_update_wm(struct drm_device *dev)
4389 {
4390         static const int sr_latency_ns = 12000;
4391         struct drm_i915_private *dev_priv = dev->dev_private;
4392         int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
4393         int plane_sr, cursor_sr;
4394         unsigned int enabled = 0;
4395
4396         if (g4x_compute_wm0(dev, 0,
4397                             &g4x_wm_info, latency_ns,
4398                             &g4x_cursor_wm_info, latency_ns,
4399                             &planea_wm, &cursora_wm))
4400                 enabled |= 1;
4401
4402         if (g4x_compute_wm0(dev, 1,
4403                             &g4x_wm_info, latency_ns,
4404                             &g4x_cursor_wm_info, latency_ns,
4405                             &planeb_wm, &cursorb_wm))
4406                 enabled |= 2;
4407
4408         plane_sr = cursor_sr = 0;
4409         if (single_plane_enabled(enabled) &&
4410             g4x_compute_srwm(dev, ffs(enabled) - 1,
4411                              sr_latency_ns,
4412                              &g4x_wm_info,
4413                              &g4x_cursor_wm_info,
4414                              &plane_sr, &cursor_sr))
4415                 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4416         else
4417                 I915_WRITE(FW_BLC_SELF,
4418                            I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
4419
4420         DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
4421                       planea_wm, cursora_wm,
4422                       planeb_wm, cursorb_wm,
4423                       plane_sr, cursor_sr);
4424
4425         I915_WRITE(DSPFW1,
4426                    (plane_sr << DSPFW_SR_SHIFT) |
4427                    (cursorb_wm << DSPFW_CURSORB_SHIFT) |
4428                    (planeb_wm << DSPFW_PLANEB_SHIFT) |
4429                    planea_wm);
4430         I915_WRITE(DSPFW2,
4431                    (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
4432                    (cursora_wm << DSPFW_CURSORA_SHIFT));
4433         /* HPLL off in SR has some issues on G4x... disable it */
4434         I915_WRITE(DSPFW3,
4435                    (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
4436                    (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4437 }
4438
4439 static void i965_update_wm(struct drm_device *dev)
4440 {
4441         struct drm_i915_private *dev_priv = dev->dev_private;
4442         struct drm_crtc *crtc;
4443         int srwm = 1;
4444         int cursor_sr = 16;
4445
4446         /* Calc sr entries for one plane configs */
4447         crtc = single_enabled_crtc(dev);
4448         if (crtc) {
4449                 /* self-refresh has much higher latency */
4450                 static const int sr_latency_ns = 12000;
4451                 int clock = crtc->mode.clock;
4452                 int htotal = crtc->mode.htotal;
4453                 int hdisplay = crtc->mode.hdisplay;
4454                 int pixel_size = crtc->fb->bits_per_pixel / 8;
4455                 unsigned long line_time_us;
4456                 int entries;
4457
4458                 line_time_us = ((htotal * 1000) / clock);
4459
4460                 /* Use ns/us then divide to preserve precision */
4461                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4462                         pixel_size * hdisplay;
4463                 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
4464                 srwm = I965_FIFO_SIZE - entries;
4465                 if (srwm < 0)
4466                         srwm = 1;
4467                 srwm &= 0x1ff;
4468                 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
4469                               entries, srwm);
4470
4471                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4472                         pixel_size * 64;
4473                 entries = DIV_ROUND_UP(entries,
4474                                           i965_cursor_wm_info.cacheline_size);
4475                 cursor_sr = i965_cursor_wm_info.fifo_size -
4476                         (entries + i965_cursor_wm_info.guard_size);
4477
4478                 if (cursor_sr > i965_cursor_wm_info.max_wm)
4479                         cursor_sr = i965_cursor_wm_info.max_wm;
4480
4481                 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
4482                               "cursor %d\n", srwm, cursor_sr);
4483
4484                 if (IS_CRESTLINE(dev))
4485                         I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4486         } else {
4487                 /* Turn off self refresh if both pipes are enabled */
4488                 if (IS_CRESTLINE(dev))
4489                         I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
4490                                    & ~FW_BLC_SELF_EN);
4491         }
4492
4493         DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
4494                       srwm);
4495
4496         /* 965 has limitations... */
4497         I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
4498                    (8 << 16) | (8 << 8) | (8 << 0));
4499         I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
4500         /* update cursor SR watermark */
4501         I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4502 }
4503
4504 static void i9xx_update_wm(struct drm_device *dev)
4505 {
4506         struct drm_i915_private *dev_priv = dev->dev_private;
4507         const struct intel_watermark_params *wm_info;
4508         uint32_t fwater_lo;
4509         uint32_t fwater_hi;
4510         int cwm, srwm = 1;
4511         int fifo_size;
4512         int planea_wm, planeb_wm;
4513         struct drm_crtc *crtc, *enabled = NULL;
4514
4515         if (IS_I945GM(dev))
4516                 wm_info = &i945_wm_info;
4517         else if (!IS_GEN2(dev))
4518                 wm_info = &i915_wm_info;
4519         else
4520                 wm_info = &i855_wm_info;
4521
4522         fifo_size = dev_priv->display.get_fifo_size(dev, 0);
4523         crtc = intel_get_crtc_for_plane(dev, 0);
4524         if (crtc->enabled && crtc->fb) {
4525                 planea_wm = intel_calculate_wm(crtc->mode.clock,
4526                                                wm_info, fifo_size,
4527                                                crtc->fb->bits_per_pixel / 8,
4528                                                latency_ns);
4529                 enabled = crtc;
4530         } else
4531                 planea_wm = fifo_size - wm_info->guard_size;
4532
4533         fifo_size = dev_priv->display.get_fifo_size(dev, 1);
4534         crtc = intel_get_crtc_for_plane(dev, 1);
4535         if (crtc->enabled && crtc->fb) {
4536                 planeb_wm = intel_calculate_wm(crtc->mode.clock,
4537                                                wm_info, fifo_size,
4538                                                crtc->fb->bits_per_pixel / 8,
4539                                                latency_ns);
4540                 if (enabled == NULL)
4541                         enabled = crtc;
4542                 else
4543                         enabled = NULL;
4544         } else
4545                 planeb_wm = fifo_size - wm_info->guard_size;
4546
4547         DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
4548
4549         /*
4550          * Overlay gets an aggressive default since video jitter is bad.
4551          */
4552         cwm = 2;
4553
4554         /* Play safe and disable self-refresh before adjusting watermarks. */
4555         if (IS_I945G(dev) || IS_I945GM(dev))
4556                 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
4557         else if (IS_I915GM(dev))
4558                 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
4559
4560         /* Calc sr entries for one plane configs */
4561         if (HAS_FW_BLC(dev) && enabled) {
4562                 /* self-refresh has much higher latency */
4563                 static const int sr_latency_ns = 6000;
4564                 int clock = enabled->mode.clock;
4565                 int htotal = enabled->mode.htotal;
4566                 int hdisplay = enabled->mode.hdisplay;
4567                 int pixel_size = enabled->fb->bits_per_pixel / 8;
4568                 unsigned long line_time_us;
4569                 int entries;
4570
4571                 line_time_us = (htotal * 1000) / clock;
4572
4573                 /* Use ns/us then divide to preserve precision */
4574                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4575                         pixel_size * hdisplay;
4576                 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
4577                 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
4578                 srwm = wm_info->fifo_size - entries;
4579                 if (srwm < 0)
4580                         srwm = 1;
4581
4582                 if (IS_I945G(dev) || IS_I945GM(dev))
4583                         I915_WRITE(FW_BLC_SELF,
4584                                    FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
4585                 else if (IS_I915GM(dev))
4586                         I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
4587         }
4588
4589         DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
4590                       planea_wm, planeb_wm, cwm, srwm);
4591
4592         fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
4593         fwater_hi = (cwm & 0x1f);
4594
4595         /* Set request length to 8 cachelines per fetch */
4596         fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
4597         fwater_hi = fwater_hi | (1 << 8);
4598
4599         I915_WRITE(FW_BLC, fwater_lo);
4600         I915_WRITE(FW_BLC2, fwater_hi);
4601
4602         if (HAS_FW_BLC(dev)) {
4603                 if (enabled) {
4604                         if (IS_I945G(dev) || IS_I945GM(dev))
4605                                 I915_WRITE(FW_BLC_SELF,
4606                                            FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4607                         else if (IS_I915GM(dev))
4608                                 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
4609                         DRM_DEBUG_KMS("memory self refresh enabled\n");
4610                 } else
4611                         DRM_DEBUG_KMS("memory self refresh disabled\n");
4612         }
4613 }
4614
4615 static void i830_update_wm(struct drm_device *dev)
4616 {
4617         struct drm_i915_private *dev_priv = dev->dev_private;
4618         struct drm_crtc *crtc;
4619         uint32_t fwater_lo;
4620         int planea_wm;
4621
4622         crtc = single_enabled_crtc(dev);
4623         if (crtc == NULL)
4624                 return;
4625
4626         planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
4627                                        dev_priv->display.get_fifo_size(dev, 0),
4628                                        crtc->fb->bits_per_pixel / 8,
4629                                        latency_ns);
4630         fwater_lo = I915_READ(FW_BLC) & ~0xfff;
4631         fwater_lo |= (3<<8) | planea_wm;
4632
4633         DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
4634
4635         I915_WRITE(FW_BLC, fwater_lo);
4636 }
4637
4638 #define ILK_LP0_PLANE_LATENCY           700
4639 #define ILK_LP0_CURSOR_LATENCY          1300
4640
4641 /*
4642  * Check the wm result.
4643  *
4644  * If any calculated watermark values is larger than the maximum value that
4645  * can be programmed into the associated watermark register, that watermark
4646  * must be disabled.
4647  */
4648 static bool ironlake_check_srwm(struct drm_device *dev, int level,
4649                                 int fbc_wm, int display_wm, int cursor_wm,
4650                                 const struct intel_watermark_params *display,
4651                                 const struct intel_watermark_params *cursor)
4652 {
4653         struct drm_i915_private *dev_priv = dev->dev_private;
4654
4655         DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
4656                       " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
4657
4658         if (fbc_wm > SNB_FBC_MAX_SRWM) {
4659                 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
4660                               fbc_wm, SNB_FBC_MAX_SRWM, level);
4661
4662                 /* fbc has it's own way to disable FBC WM */
4663                 I915_WRITE(DISP_ARB_CTL,
4664                            I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
4665                 return false;
4666         }
4667
4668         if (display_wm > display->max_wm) {
4669                 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
4670                               display_wm, SNB_DISPLAY_MAX_SRWM, level);
4671                 return false;
4672         }
4673
4674         if (cursor_wm > cursor->max_wm) {
4675                 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
4676                               cursor_wm, SNB_CURSOR_MAX_SRWM, level);
4677                 return false;
4678         }
4679
4680         if (!(fbc_wm || display_wm || cursor_wm)) {
4681                 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
4682                 return false;
4683         }
4684
4685         return true;
4686 }
4687
4688 /*
4689  * Compute watermark values of WM[1-3],
4690  */
4691 static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
4692                                   int latency_ns,
4693                                   const struct intel_watermark_params *display,
4694                                   const struct intel_watermark_params *cursor,
4695                                   int *fbc_wm, int *display_wm, int *cursor_wm)
4696 {
4697         struct drm_crtc *crtc;
4698         unsigned long line_time_us;
4699         int hdisplay, htotal, pixel_size, clock;
4700         int line_count, line_size;
4701         int small, large;
4702         int entries;
4703
4704         if (!latency_ns) {
4705                 *fbc_wm = *display_wm = *cursor_wm = 0;
4706                 return false;
4707         }
4708
4709         crtc = intel_get_crtc_for_plane(dev, plane);
4710         hdisplay = crtc->mode.hdisplay;
4711         htotal = crtc->mode.htotal;
4712         clock = crtc->mode.clock;
4713         pixel_size = crtc->fb->bits_per_pixel / 8;
4714
4715         line_time_us = (htotal * 1000) / clock;
4716         line_count = (latency_ns / line_time_us + 1000) / 1000;
4717         line_size = hdisplay * pixel_size;
4718
4719         /* Use the minimum of the small and large buffer method for primary */
4720         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4721         large = line_count * line_size;
4722
4723         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4724         *display_wm = entries + display->guard_size;
4725
4726         /*
4727          * Spec says:
4728          * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
4729          */
4730         *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
4731
4732         /* calculate the self-refresh watermark for display cursor */
4733         entries = line_count * pixel_size * 64;
4734         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4735         *cursor_wm = entries + cursor->guard_size;
4736
4737         return ironlake_check_srwm(dev, level,
4738                                    *fbc_wm, *display_wm, *cursor_wm,
4739                                    display, cursor);
4740 }
4741
4742 static void ironlake_update_wm(struct drm_device *dev)
4743 {
4744         struct drm_i915_private *dev_priv = dev->dev_private;
4745         int fbc_wm, plane_wm, cursor_wm;
4746         unsigned int enabled;
4747
4748         enabled = 0;
4749         if (g4x_compute_wm0(dev, 0,
4750                             &ironlake_display_wm_info,
4751                             ILK_LP0_PLANE_LATENCY,
4752                             &ironlake_cursor_wm_info,
4753                             ILK_LP0_CURSOR_LATENCY,
4754                             &plane_wm, &cursor_wm)) {
4755                 I915_WRITE(WM0_PIPEA_ILK,
4756                            (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4757                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4758                               " plane %d, " "cursor: %d\n",
4759                               plane_wm, cursor_wm);
4760                 enabled |= 1;
4761         }
4762
4763         if (g4x_compute_wm0(dev, 1,
4764                             &ironlake_display_wm_info,
4765                             ILK_LP0_PLANE_LATENCY,
4766                             &ironlake_cursor_wm_info,
4767                             ILK_LP0_CURSOR_LATENCY,
4768                             &plane_wm, &cursor_wm)) {
4769                 I915_WRITE(WM0_PIPEB_ILK,
4770                            (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4771                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4772                               " plane %d, cursor: %d\n",
4773                               plane_wm, cursor_wm);
4774                 enabled |= 2;
4775         }
4776
4777         /*
4778          * Calculate and update the self-refresh watermark only when one
4779          * display plane is used.
4780          */
4781         I915_WRITE(WM3_LP_ILK, 0);
4782         I915_WRITE(WM2_LP_ILK, 0);
4783         I915_WRITE(WM1_LP_ILK, 0);
4784
4785         if (!single_plane_enabled(enabled))
4786                 return;
4787         enabled = ffs(enabled) - 1;
4788
4789         /* WM1 */
4790         if (!ironlake_compute_srwm(dev, 1, enabled,
4791                                    ILK_READ_WM1_LATENCY() * 500,
4792                                    &ironlake_display_srwm_info,
4793                                    &ironlake_cursor_srwm_info,
4794                                    &fbc_wm, &plane_wm, &cursor_wm))
4795                 return;
4796
4797         I915_WRITE(WM1_LP_ILK,
4798                    WM1_LP_SR_EN |
4799                    (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4800                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4801                    (plane_wm << WM1_LP_SR_SHIFT) |
4802                    cursor_wm);
4803
4804         /* WM2 */
4805         if (!ironlake_compute_srwm(dev, 2, enabled,
4806                                    ILK_READ_WM2_LATENCY() * 500,
4807                                    &ironlake_display_srwm_info,
4808                                    &ironlake_cursor_srwm_info,
4809                                    &fbc_wm, &plane_wm, &cursor_wm))
4810                 return;
4811
4812         I915_WRITE(WM2_LP_ILK,
4813                    WM2_LP_EN |
4814                    (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4815                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4816                    (plane_wm << WM1_LP_SR_SHIFT) |
4817                    cursor_wm);
4818
4819         /*
4820          * WM3 is unsupported on ILK, probably because we don't have latency
4821          * data for that power state
4822          */
4823 }
4824
4825 void sandybridge_update_wm(struct drm_device *dev)
4826 {
4827         struct drm_i915_private *dev_priv = dev->dev_private;
4828         int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
4829         u32 val;
4830         int fbc_wm, plane_wm, cursor_wm;
4831         unsigned int enabled;
4832
4833         enabled = 0;
4834         if (g4x_compute_wm0(dev, 0,
4835                             &sandybridge_display_wm_info, latency,
4836                             &sandybridge_cursor_wm_info, latency,
4837                             &plane_wm, &cursor_wm)) {
4838                 val = I915_READ(WM0_PIPEA_ILK);
4839                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4840                 I915_WRITE(WM0_PIPEA_ILK, val |
4841                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4842                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4843                               " plane %d, " "cursor: %d\n",
4844                               plane_wm, cursor_wm);
4845                 enabled |= 1;
4846         }
4847
4848         if (g4x_compute_wm0(dev, 1,
4849                             &sandybridge_display_wm_info, latency,
4850                             &sandybridge_cursor_wm_info, latency,
4851                             &plane_wm, &cursor_wm)) {
4852                 val = I915_READ(WM0_PIPEB_ILK);
4853                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4854                 I915_WRITE(WM0_PIPEB_ILK, val |
4855                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4856                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4857                               " plane %d, cursor: %d\n",
4858                               plane_wm, cursor_wm);
4859                 enabled |= 2;
4860         }
4861
4862         /* IVB has 3 pipes */
4863         if (IS_IVYBRIDGE(dev) &&
4864             g4x_compute_wm0(dev, 2,
4865                             &sandybridge_display_wm_info, latency,
4866                             &sandybridge_cursor_wm_info, latency,
4867                             &plane_wm, &cursor_wm)) {
4868                 val = I915_READ(WM0_PIPEC_IVB);
4869                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4870                 I915_WRITE(WM0_PIPEC_IVB, val |
4871                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4872                 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
4873                               " plane %d, cursor: %d\n",
4874                               plane_wm, cursor_wm);
4875                 enabled |= 3;
4876         }
4877
4878         /*
4879          * Calculate and update the self-refresh watermark only when one
4880          * display plane is used.
4881          *
4882          * SNB support 3 levels of watermark.
4883          *
4884          * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
4885          * and disabled in the descending order
4886          *
4887          */
4888         I915_WRITE(WM3_LP_ILK, 0);
4889         I915_WRITE(WM2_LP_ILK, 0);
4890         I915_WRITE(WM1_LP_ILK, 0);
4891
4892         if (!single_plane_enabled(enabled) ||
4893             dev_priv->sprite_scaling_enabled)
4894                 return;
4895         enabled = ffs(enabled) - 1;
4896
4897         /* WM1 */
4898         if (!ironlake_compute_srwm(dev, 1, enabled,
4899                                    SNB_READ_WM1_LATENCY() * 500,
4900                                    &sandybridge_display_srwm_info,
4901                                    &sandybridge_cursor_srwm_info,
4902                                    &fbc_wm, &plane_wm, &cursor_wm))
4903                 return;
4904
4905         I915_WRITE(WM1_LP_ILK,
4906                    WM1_LP_SR_EN |
4907                    (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4908                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4909                    (plane_wm << WM1_LP_SR_SHIFT) |
4910                    cursor_wm);
4911
4912         /* WM2 */
4913         if (!ironlake_compute_srwm(dev, 2, enabled,
4914                                    SNB_READ_WM2_LATENCY() * 500,
4915                                    &sandybridge_display_srwm_info,
4916                                    &sandybridge_cursor_srwm_info,
4917                                    &fbc_wm, &plane_wm, &cursor_wm))
4918                 return;
4919
4920         I915_WRITE(WM2_LP_ILK,
4921                    WM2_LP_EN |
4922                    (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4923                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4924                    (plane_wm << WM1_LP_SR_SHIFT) |
4925                    cursor_wm);
4926
4927         /* WM3 */
4928         if (!ironlake_compute_srwm(dev, 3, enabled,
4929                                    SNB_READ_WM3_LATENCY() * 500,
4930                                    &sandybridge_display_srwm_info,
4931                                    &sandybridge_cursor_srwm_info,
4932                                    &fbc_wm, &plane_wm, &cursor_wm))
4933                 return;
4934
4935         I915_WRITE(WM3_LP_ILK,
4936                    WM3_LP_EN |
4937                    (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4938                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4939                    (plane_wm << WM1_LP_SR_SHIFT) |
4940                    cursor_wm);
4941 }
4942
4943 static bool
4944 sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
4945                               uint32_t sprite_width, int pixel_size,
4946                               const struct intel_watermark_params *display,
4947                               int display_latency_ns, int *sprite_wm)
4948 {
4949         struct drm_crtc *crtc;
4950         int clock;
4951         int entries, tlb_miss;
4952
4953         crtc = intel_get_crtc_for_plane(dev, plane);
4954         if (crtc->fb == NULL || !crtc->enabled) {
4955                 *sprite_wm = display->guard_size;
4956                 return false;
4957         }
4958
4959         clock = crtc->mode.clock;
4960
4961         /* Use the small buffer method to calculate the sprite watermark */
4962         entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4963         tlb_miss = display->fifo_size*display->cacheline_size -
4964                 sprite_width * 8;
4965         if (tlb_miss > 0)
4966                 entries += tlb_miss;
4967         entries = DIV_ROUND_UP(entries, display->cacheline_size);
4968         *sprite_wm = entries + display->guard_size;
4969         if (*sprite_wm > (int)display->max_wm)
4970                 *sprite_wm = display->max_wm;
4971
4972         return true;
4973 }
4974
4975 static bool
4976 sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
4977                                 uint32_t sprite_width, int pixel_size,
4978                                 const struct intel_watermark_params *display,
4979                                 int latency_ns, int *sprite_wm)
4980 {
4981         struct drm_crtc *crtc;
4982         unsigned long line_time_us;
4983         int clock;
4984         int line_count, line_size;
4985         int small, large;
4986         int entries;
4987
4988         if (!latency_ns) {
4989                 *sprite_wm = 0;
4990                 return false;
4991         }
4992
4993         crtc = intel_get_crtc_for_plane(dev, plane);
4994         clock = crtc->mode.clock;
4995
4996         line_time_us = (sprite_width * 1000) / clock;
4997         line_count = (latency_ns / line_time_us + 1000) / 1000;
4998         line_size = sprite_width * pixel_size;
4999
5000         /* Use the minimum of the small and large buffer method for primary */
5001         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
5002         large = line_count * line_size;
5003
5004         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
5005         *sprite_wm = entries + display->guard_size;
5006
5007         return *sprite_wm > 0x3ff ? false : true;
5008 }
5009
5010 static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
5011                                          uint32_t sprite_width, int pixel_size)
5012 {
5013         struct drm_i915_private *dev_priv = dev->dev_private;
5014         int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
5015         u32 val;
5016         int sprite_wm, reg;
5017         int ret;
5018
5019         switch (pipe) {
5020         case 0:
5021                 reg = WM0_PIPEA_ILK;
5022                 break;
5023         case 1:
5024                 reg = WM0_PIPEB_ILK;
5025                 break;
5026         case 2:
5027                 reg = WM0_PIPEC_IVB;
5028                 break;
5029         default:
5030                 return; /* bad pipe */
5031         }
5032
5033         ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
5034                                             &sandybridge_display_wm_info,
5035                                             latency, &sprite_wm);
5036         if (!ret) {
5037                 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
5038                               pipe);
5039                 return;
5040         }
5041
5042         val = I915_READ(reg);
5043         val &= ~WM0_PIPE_SPRITE_MASK;
5044         I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
5045         DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
5046
5047
5048         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
5049                                               pixel_size,
5050                                               &sandybridge_display_srwm_info,
5051                                               SNB_READ_WM1_LATENCY() * 500,
5052                                               &sprite_wm);
5053         if (!ret) {
5054                 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
5055                               pipe);
5056                 return;
5057         }
5058         I915_WRITE(WM1S_LP_ILK, sprite_wm);
5059
5060         /* Only IVB has two more LP watermarks for sprite */
5061         if (!IS_IVYBRIDGE(dev))
5062                 return;
5063
5064         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
5065                                               pixel_size,
5066                                               &sandybridge_display_srwm_info,
5067                                               SNB_READ_WM2_LATENCY() * 500,
5068                                               &sprite_wm);
5069         if (!ret) {
5070                 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
5071                               pipe);
5072                 return;
5073         }
5074         I915_WRITE(WM2S_LP_IVB, sprite_wm);
5075
5076         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
5077                                               pixel_size,
5078                                               &sandybridge_display_srwm_info,
5079                                               SNB_READ_WM3_LATENCY() * 500,
5080                                               &sprite_wm);
5081         if (!ret) {
5082                 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
5083                               pipe);
5084                 return;
5085         }
5086         I915_WRITE(WM3S_LP_IVB, sprite_wm);
5087 }
5088
5089 /**
5090  * intel_update_watermarks - update FIFO watermark values based on current modes
5091  *
5092  * Calculate watermark values for the various WM regs based on current mode
5093  * and plane configuration.
5094  *
5095  * There are several cases to deal with here:
5096  *   - normal (i.e. non-self-refresh)
5097  *   - self-refresh (SR) mode
5098  *   - lines are large relative to FIFO size (buffer can hold up to 2)
5099  *   - lines are small relative to FIFO size (buffer can hold more than 2
5100  *     lines), so need to account for TLB latency
5101  *
5102  *   The normal calculation is:
5103  *     watermark = dotclock * bytes per pixel * latency
5104  *   where latency is platform & configuration dependent (we assume pessimal
5105  *   values here).
5106  *
5107  *   The SR calculation is:
5108  *     watermark = (trunc(latency/line time)+1) * surface width *
5109  *       bytes per pixel
5110  *   where
5111  *     line time = htotal / dotclock
5112  *     surface width = hdisplay for normal plane and 64 for cursor
5113  *   and latency is assumed to be high, as above.
5114  *
5115  * The final value programmed to the register should always be rounded up,
5116  * and include an extra 2 entries to account for clock crossings.
5117  *
5118  * We don't use the sprite, so we can ignore that.  And on Crestline we have
5119  * to set the non-SR watermarks to 8.
5120  */
5121 static void intel_update_watermarks(struct drm_device *dev)
5122 {
5123         struct drm_i915_private *dev_priv = dev->dev_private;
5124
5125         if (dev_priv->display.update_wm)
5126                 dev_priv->display.update_wm(dev);
5127 }
5128
5129 void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
5130                                     uint32_t sprite_width, int pixel_size)
5131 {
5132         struct drm_i915_private *dev_priv = dev->dev_private;
5133
5134         if (dev_priv->display.update_sprite_wm)
5135                 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
5136                                                    pixel_size);
5137 }
5138
5139 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
5140 {
5141         if (i915_panel_use_ssc >= 0)
5142                 return i915_panel_use_ssc != 0;
5143         return dev_priv->lvds_use_ssc
5144                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
5145 }
5146
5147 /**
5148  * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
5149  * @crtc: CRTC structure
5150  * @mode: requested mode
5151  *
5152  * A pipe may be connected to one or more outputs.  Based on the depth of the
5153  * attached framebuffer, choose a good color depth to use on the pipe.
5154  *
5155  * If possible, match the pipe depth to the fb depth.  In some cases, this
5156  * isn't ideal, because the connected output supports a lesser or restricted
5157  * set of depths.  Resolve that here:
5158  *    LVDS typically supports only 6bpc, so clamp down in that case
5159  *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
5160  *    Displays may support a restricted set as well, check EDID and clamp as
5161  *      appropriate.
5162  *    DP may want to dither down to 6bpc to fit larger modes
5163  *
5164  * RETURNS:
5165  * Dithering requirement (i.e. false if display bpc and pipe bpc match,
5166  * true if they don't match).
5167  */
5168 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
5169                                          unsigned int *pipe_bpp,
5170                                          struct drm_display_mode *mode)
5171 {
5172         struct drm_device *dev = crtc->dev;
5173         struct drm_i915_private *dev_priv = dev->dev_private;
5174         struct drm_encoder *encoder;
5175         struct drm_connector *connector;
5176         unsigned int display_bpc = UINT_MAX, bpc;
5177
5178         /* Walk the encoders & connectors on this crtc, get min bpc */
5179         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
5180                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5181
5182                 if (encoder->crtc != crtc)
5183                         continue;
5184
5185                 if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
5186                         unsigned int lvds_bpc;
5187
5188                         if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
5189                             LVDS_A3_POWER_UP)
5190                                 lvds_bpc = 8;
5191                         else
5192                                 lvds_bpc = 6;
5193
5194                         if (lvds_bpc < display_bpc) {
5195                                 DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
5196                                 display_bpc = lvds_bpc;
5197                         }
5198                         continue;
5199                 }
5200
5201                 if (intel_encoder->type == INTEL_OUTPUT_EDP) {
5202                         /* Use VBT settings if we have an eDP panel */
5203                         unsigned int edp_bpc = dev_priv->edp.bpp / 3;
5204
5205                         if (edp_bpc < display_bpc) {
5206                                 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
5207                                 display_bpc = edp_bpc;
5208                         }
5209                         continue;
5210                 }
5211
5212                 /* Not one of the known troublemakers, check the EDID */
5213                 list_for_each_entry(connector, &dev->mode_config.connector_list,
5214                                     head) {
5215                         if (connector->encoder != encoder)
5216                                 continue;
5217
5218                         /* Don't use an invalid EDID bpc value */
5219                         if (connector->display_info.bpc &&
5220                             connector->display_info.bpc < display_bpc) {
5221                                 DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
5222                                 display_bpc = connector->display_info.bpc;
5223                         }
5224                 }
5225
5226                 /*
5227                  * HDMI is either 12 or 8, so if the display lets 10bpc sneak
5228                  * through, clamp it down.  (Note: >12bpc will be caught below.)
5229                  */
5230                 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
5231                         if (display_bpc > 8 && display_bpc < 12) {
5232                                 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
5233                                 display_bpc = 12;
5234                         } else {
5235                                 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
5236                                 display_bpc = 8;
5237                         }
5238                 }
5239         }
5240
5241         if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5242                 DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
5243                 display_bpc = 6;
5244         }
5245
5246         /*
5247          * We could just drive the pipe at the highest bpc all the time and
5248          * enable dithering as needed, but that costs bandwidth.  So choose
5249          * the minimum value that expresses the full color range of the fb but
5250          * also stays within the max display bpc discovered above.
5251          */
5252
5253         switch (crtc->fb->depth) {
5254         case 8:
5255                 bpc = 8; /* since we go through a colormap */
5256                 break;
5257         case 15:
5258         case 16:
5259                 bpc = 6; /* min is 18bpp */
5260                 break;
5261         case 24:
5262                 bpc = 8;
5263                 break;
5264         case 30:
5265                 bpc = 10;
5266                 break;
5267         case 48:
5268                 bpc = 12;
5269                 break;
5270         default:
5271                 DRM_DEBUG("unsupported depth, assuming 24 bits\n");
5272                 bpc = min((unsigned int)8, display_bpc);
5273                 break;
5274         }
5275
5276         display_bpc = min(display_bpc, bpc);
5277
5278         DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
5279                       bpc, display_bpc);
5280
5281         *pipe_bpp = display_bpc * 3;
5282
5283         return display_bpc != bpc;
5284 }
5285
5286 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
5287 {
5288         struct drm_device *dev = crtc->dev;
5289         struct drm_i915_private *dev_priv = dev->dev_private;
5290         int refclk;
5291
5292         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5293             intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5294                 refclk = dev_priv->lvds_ssc_freq * 1000;
5295                 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5296                               refclk / 1000);
5297         } else if (!IS_GEN2(dev)) {
5298                 refclk = 96000;
5299         } else {
5300                 refclk = 48000;
5301         }
5302
5303         return refclk;
5304 }
5305
5306 static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
5307                                       intel_clock_t *clock)
5308 {
5309         /* SDVO TV has fixed PLL values depend on its clock range,
5310            this mirrors vbios setting. */
5311         if (adjusted_mode->clock >= 100000
5312             && adjusted_mode->clock < 140500) {
5313                 clock->p1 = 2;
5314                 clock->p2 = 10;
5315                 clock->n = 3;
5316                 clock->m1 = 16;
5317                 clock->m2 = 8;
5318         } else if (adjusted_mode->clock >= 140500
5319                    && adjusted_mode->clock <= 200000) {
5320                 clock->p1 = 1;
5321                 clock->p2 = 10;
5322                 clock->n = 6;
5323                 clock->m1 = 12;
5324                 clock->m2 = 8;
5325         }
5326 }
5327
5328 static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
5329                                      intel_clock_t *clock,
5330                                      intel_clock_t *reduced_clock)
5331 {
5332         struct drm_device *dev = crtc->dev;
5333         struct drm_i915_private *dev_priv = dev->dev_private;
5334         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5335         int pipe = intel_crtc->pipe;
5336         u32 fp, fp2 = 0;
5337
5338         if (IS_PINEVIEW(dev)) {
5339                 fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
5340                 if (reduced_clock)
5341                         fp2 = (1 << reduced_clock->n) << 16 |
5342                                 reduced_clock->m1 << 8 | reduced_clock->m2;
5343         } else {
5344                 fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
5345                 if (reduced_clock)
5346                         fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
5347                                 reduced_clock->m2;
5348         }
5349
5350         I915_WRITE(FP0(pipe), fp);
5351
5352         intel_crtc->lowfreq_avail = false;
5353         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5354             reduced_clock && i915_powersave) {
5355                 I915_WRITE(FP1(pipe), fp2);
5356                 intel_crtc->lowfreq_avail = true;
5357         } else {
5358                 I915_WRITE(FP1(pipe), fp);
5359         }
5360 }
5361
5362 static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
5363                               struct drm_display_mode *adjusted_mode)
5364 {
5365         struct drm_device *dev = crtc->dev;
5366         struct drm_i915_private *dev_priv = dev->dev_private;
5367         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5368         int pipe = intel_crtc->pipe;
5369         u32 temp, lvds_sync = 0;
5370
5371         temp = I915_READ(LVDS);
5372         temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5373         if (pipe == 1) {
5374                 temp |= LVDS_PIPEB_SELECT;
5375         } else {
5376                 temp &= ~LVDS_PIPEB_SELECT;
5377         }
5378         /* set the corresponsding LVDS_BORDER bit */
5379         temp |= dev_priv->lvds_border_bits;
5380         /* Set the B0-B3 data pairs corresponding to whether we're going to
5381          * set the DPLLs for dual-channel mode or not.
5382          */
5383         if (clock->p2 == 7)
5384                 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5385         else
5386                 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5387
5388         /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5389          * appropriately here, but we need to look more thoroughly into how
5390          * panels behave in the two modes.
5391          */
5392         /* set the dithering flag on LVDS as needed */
5393         if (INTEL_INFO(dev)->gen >= 4) {
5394                 if (dev_priv->lvds_dither)
5395                         temp |= LVDS_ENABLE_DITHER;
5396                 else
5397                         temp &= ~LVDS_ENABLE_DITHER;
5398         }
5399         if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5400                 lvds_sync |= LVDS_HSYNC_POLARITY;
5401         if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5402                 lvds_sync |= LVDS_VSYNC_POLARITY;
5403         if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5404             != lvds_sync) {
5405                 char flags[2] = "-+";
5406                 DRM_INFO("Changing LVDS panel from "
5407                          "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5408                          flags[!(temp & LVDS_HSYNC_POLARITY)],
5409                          flags[!(temp & LVDS_VSYNC_POLARITY)],
5410                          flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5411                          flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5412                 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5413                 temp |= lvds_sync;
5414         }
5415         I915_WRITE(LVDS, temp);
5416 }
5417
5418 static void i9xx_update_pll(struct drm_crtc *crtc,
5419                             struct drm_display_mode *mode,
5420                             struct drm_display_mode *adjusted_mode,
5421                             intel_clock_t *clock, intel_clock_t *reduced_clock,
5422                             int num_connectors)
5423 {
5424         struct drm_device *dev = crtc->dev;
5425         struct drm_i915_private *dev_priv = dev->dev_private;
5426         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5427         int pipe = intel_crtc->pipe;
5428         u32 dpll;
5429         bool is_sdvo;
5430
5431         is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
5432                 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
5433
5434         dpll = DPLL_VGA_MODE_DIS;
5435
5436         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
5437                 dpll |= DPLLB_MODE_LVDS;
5438         else
5439                 dpll |= DPLLB_MODE_DAC_SERIAL;
5440         if (is_sdvo) {
5441                 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5442                 if (pixel_multiplier > 1) {
5443                         if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
5444                                 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
5445                 }
5446                 dpll |= DPLL_DVO_HIGH_SPEED;
5447         }
5448         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
5449                 dpll |= DPLL_DVO_HIGH_SPEED;
5450
5451         /* compute bitmask from p1 value */
5452         if (IS_PINEVIEW(dev))
5453                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5454         else {
5455                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5456                 if (IS_G4X(dev) && reduced_clock)
5457                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5458         }
5459         switch (clock->p2) {
5460         case 5:
5461                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5462                 break;
5463         case 7:
5464                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5465                 break;
5466         case 10:
5467                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5468                 break;
5469         case 14:
5470                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5471                 break;
5472         }
5473         if (INTEL_INFO(dev)->gen >= 4)
5474                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5475
5476         if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
5477                 dpll |= PLL_REF_INPUT_TVCLKINBC;
5478         else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
5479                 /* XXX: just matching BIOS for now */
5480                 /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
5481                 dpll |= 3;
5482         else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5483                  intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5484                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5485         else
5486                 dpll |= PLL_REF_INPUT_DREFCLK;
5487
5488         dpll |= DPLL_VCO_ENABLE;
5489         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5490         POSTING_READ(DPLL(pipe));
5491         udelay(150);
5492
5493         /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5494          * This is an exception to the general rule that mode_set doesn't turn
5495          * things on.
5496          */
5497         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
5498                 intel_update_lvds(crtc, clock, adjusted_mode);
5499
5500         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
5501                 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5502
5503         I915_WRITE(DPLL(pipe), dpll);
5504
5505         /* Wait for the clocks to stabilize. */
5506         POSTING_READ(DPLL(pipe));
5507         udelay(150);
5508
5509         if (INTEL_INFO(dev)->gen >= 4) {
5510                 u32 temp = 0;
5511                 if (is_sdvo) {
5512                         temp = intel_mode_get_pixel_multiplier(adjusted_mode);
5513                         if (temp > 1)
5514                                 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5515                         else
5516                                 temp = 0;
5517                 }
5518                 I915_WRITE(DPLL_MD(pipe), temp);
5519         } else {
5520                 /* The pixel multiplier can only be updated once the
5521                  * DPLL is enabled and the clocks are stable.
5522                  *
5523                  * So write it again.
5524                  */
5525                 I915_WRITE(DPLL(pipe), dpll);
5526         }
5527 }
5528
5529 static void i8xx_update_pll(struct drm_crtc *crtc,
5530                             struct drm_display_mode *adjusted_mode,
5531                             intel_clock_t *clock,
5532                             int num_connectors)
5533 {
5534         struct drm_device *dev = crtc->dev;
5535         struct drm_i915_private *dev_priv = dev->dev_private;
5536         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5537         int pipe = intel_crtc->pipe;
5538         u32 dpll;
5539
5540         dpll = DPLL_VGA_MODE_DIS;
5541
5542         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
5543                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5544         } else {
5545                 if (clock->p1 == 2)
5546                         dpll |= PLL_P1_DIVIDE_BY_TWO;
5547                 else
5548                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5549                 if (clock->p2 == 4)
5550                         dpll |= PLL_P2_DIVIDE_BY_4;
5551         }
5552
5553         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
5554                 /* XXX: just matching BIOS for now */
5555                 /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
5556                 dpll |= 3;
5557         else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5558                  intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5559                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5560         else
5561                 dpll |= PLL_REF_INPUT_DREFCLK;
5562
5563         dpll |= DPLL_VCO_ENABLE;
5564         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5565         POSTING_READ(DPLL(pipe));
5566         udelay(150);
5567
5568         I915_WRITE(DPLL(pipe), dpll);
5569
5570         /* Wait for the clocks to stabilize. */
5571         POSTING_READ(DPLL(pipe));
5572         udelay(150);
5573
5574         /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5575          * This is an exception to the general rule that mode_set doesn't turn
5576          * things on.
5577          */
5578         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
5579                 intel_update_lvds(crtc, clock, adjusted_mode);
5580
5581         /* The pixel multiplier can only be updated once the
5582          * DPLL is enabled and the clocks are stable.
5583          *
5584          * So write it again.
5585          */
5586         I915_WRITE(DPLL(pipe), dpll);
5587 }
5588
5589 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5590                               struct drm_display_mode *mode,
5591                               struct drm_display_mode *adjusted_mode,
5592                               int x, int y,
5593                               struct drm_framebuffer *old_fb)
5594 {
5595         struct drm_device *dev = crtc->dev;
5596         struct drm_i915_private *dev_priv = dev->dev_private;
5597         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5598         int pipe = intel_crtc->pipe;
5599         int plane = intel_crtc->plane;
5600         int refclk, num_connectors = 0;
5601         intel_clock_t clock, reduced_clock;
5602         u32 dspcntr, pipeconf, vsyncshift;
5603         bool ok, has_reduced_clock = false, is_sdvo = false;
5604         bool is_lvds = false, is_tv = false, is_dp = false;
5605         struct drm_mode_config *mode_config = &dev->mode_config;
5606         struct intel_encoder *encoder;
5607         const intel_limit_t *limit;
5608         int ret;
5609
5610         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5611                 if (encoder->base.crtc != crtc)
5612                         continue;
5613
5614                 switch (encoder->type) {
5615                 case INTEL_OUTPUT_LVDS:
5616                         is_lvds = true;
5617                         break;
5618                 case INTEL_OUTPUT_SDVO:
5619                 case INTEL_OUTPUT_HDMI:
5620                         is_sdvo = true;
5621                         if (encoder->needs_tv_clock)
5622                                 is_tv = true;
5623                         break;
5624                 case INTEL_OUTPUT_TVOUT:
5625                         is_tv = true;
5626                         break;
5627                 case INTEL_OUTPUT_DISPLAYPORT:
5628                         is_dp = true;
5629                         break;
5630                 }
5631
5632                 num_connectors++;
5633         }
5634
5635         refclk = i9xx_get_refclk(crtc, num_connectors);
5636
5637         /*
5638          * Returns a set of divisors for the desired target clock with the given
5639          * refclk, or FALSE.  The returned values represent the clock equation:
5640          * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5641          */
5642         limit = intel_limit(crtc, refclk);
5643         ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
5644                              &clock);
5645         if (!ok) {
5646                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5647                 return -EINVAL;
5648         }
5649
5650         /* Ensure that the cursor is valid for the new mode before changing... */
5651         intel_crtc_update_cursor(crtc, true);
5652
5653         if (is_lvds && dev_priv->lvds_downclock_avail) {
5654                 /*
5655                  * Ensure we match the reduced clock's P to the target clock.
5656                  * If the clocks don't match, we can't switch the display clock
5657                  * by using the FP0/FP1. In such case we will disable the LVDS
5658                  * downclock feature.
5659                 */
5660                 has_reduced_clock = limit->find_pll(limit, crtc,
5661                                                     dev_priv->lvds_downclock,
5662                                                     refclk,
5663                                                     &clock,
5664                                                     &reduced_clock);
5665         }
5666
5667         if (is_sdvo && is_tv)
5668                 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
5669
5670         i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
5671                                  &reduced_clock : NULL);
5672
5673         if (IS_GEN2(dev))
5674                 i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
5675         else
5676                 i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
5677                                 has_reduced_clock ? &reduced_clock : NULL,
5678                                 num_connectors);
5679
5680         /* setup pipeconf */
5681         pipeconf = I915_READ(PIPECONF(pipe));
5682
5683         /* Set up the display plane register */
5684         dspcntr = DISPPLANE_GAMMA_ENABLE;
5685
5686         if (pipe == 0)
5687                 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
5688         else
5689                 dspcntr |= DISPPLANE_SEL_PIPE_B;
5690
5691         if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
5692                 /* Enable pixel doubling when the dot clock is > 90% of the (display)
5693                  * core speed.
5694                  *
5695                  * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
5696                  * pipe == 0 check?
5697                  */
5698                 if (mode->clock >
5699                     dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
5700                         pipeconf |= PIPECONF_DOUBLE_WIDE;
5701                 else
5702                         pipeconf &= ~PIPECONF_DOUBLE_WIDE;
5703         }
5704
5705         /* default to 8bpc */
5706         pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
5707         if (is_dp) {
5708                 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5709                         pipeconf |= PIPECONF_BPP_6 |
5710                                     PIPECONF_DITHER_EN |
5711                                     PIPECONF_DITHER_TYPE_SP;
5712                 }
5713         }
5714
5715         DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5716         drm_mode_debug_printmodeline(mode);
5717
5718         if (HAS_PIPE_CXSR(dev)) {
5719                 if (intel_crtc->lowfreq_avail) {
5720                         DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5721                         pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5722                 } else {
5723                         DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5724                         pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5725                 }
5726         }
5727
5728         pipeconf &= ~PIPECONF_INTERLACE_MASK;
5729         if (!IS_GEN2(dev) &&
5730             adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5731                 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5732                 /* the chip adds 2 halflines automatically */
5733                 adjusted_mode->crtc_vtotal -= 1;
5734                 adjusted_mode->crtc_vblank_end -= 1;
5735                 vsyncshift = adjusted_mode->crtc_hsync_start
5736                              - adjusted_mode->crtc_htotal/2;
5737         } else {
5738                 pipeconf |= PIPECONF_PROGRESSIVE;
5739                 vsyncshift = 0;
5740         }
5741
5742         if (!IS_GEN3(dev))
5743                 I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
5744
5745         I915_WRITE(HTOTAL(pipe),
5746                    (adjusted_mode->crtc_hdisplay - 1) |
5747                    ((adjusted_mode->crtc_htotal - 1) << 16));
5748         I915_WRITE(HBLANK(pipe),
5749                    (adjusted_mode->crtc_hblank_start - 1) |
5750                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
5751         I915_WRITE(HSYNC(pipe),
5752                    (adjusted_mode->crtc_hsync_start - 1) |
5753                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
5754
5755         I915_WRITE(VTOTAL(pipe),
5756                    (adjusted_mode->crtc_vdisplay - 1) |
5757                    ((adjusted_mode->crtc_vtotal - 1) << 16));
5758         I915_WRITE(VBLANK(pipe),
5759                    (adjusted_mode->crtc_vblank_start - 1) |
5760                    ((adjusted_mode->crtc_vblank_end - 1) << 16));
5761         I915_WRITE(VSYNC(pipe),
5762                    (adjusted_mode->crtc_vsync_start - 1) |
5763                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
5764
5765         /* pipesrc and dspsize control the size that is scaled from,
5766          * which should always be the user's requested size.
5767          */
5768         I915_WRITE(DSPSIZE(plane),
5769                    ((mode->vdisplay - 1) << 16) |
5770                    (mode->hdisplay - 1));
5771         I915_WRITE(DSPPOS(plane), 0);
5772         I915_WRITE(PIPESRC(pipe),
5773                    ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5774
5775         I915_WRITE(PIPECONF(pipe), pipeconf);
5776         POSTING_READ(PIPECONF(pipe));
5777         intel_enable_pipe(dev_priv, pipe, false);
5778
5779         intel_wait_for_vblank(dev, pipe);
5780
5781         I915_WRITE(DSPCNTR(plane), dspcntr);
5782         POSTING_READ(DSPCNTR(plane));
5783         intel_enable_plane(dev_priv, plane, pipe);
5784
5785         ret = intel_pipe_set_base(crtc, x, y, old_fb);
5786
5787         intel_update_watermarks(dev);
5788
5789         return ret;
5790 }
5791
5792 /*
5793  * Initialize reference clocks when the driver loads
5794  */
5795 void ironlake_init_pch_refclk(struct drm_device *dev)
5796 {
5797         struct drm_i915_private *dev_priv = dev->dev_private;
5798         struct drm_mode_config *mode_config = &dev->mode_config;
5799         struct intel_encoder *encoder;
5800         u32 temp;
5801         bool has_lvds = false;
5802         bool has_cpu_edp = false;
5803         bool has_pch_edp = false;
5804         bool has_panel = false;
5805         bool has_ck505 = false;
5806         bool can_ssc = false;
5807
5808         /* We need to take the global config into account */
5809         list_for_each_entry(encoder, &mode_config->encoder_list,
5810                             base.head) {
5811                 switch (encoder->type) {
5812                 case INTEL_OUTPUT_LVDS:
5813                         has_panel = true;
5814                         has_lvds = true;
5815                         break;
5816                 case INTEL_OUTPUT_EDP:
5817                         has_panel = true;
5818                         if (intel_encoder_is_pch_edp(&encoder->base))
5819                                 has_pch_edp = true;
5820                         else
5821                                 has_cpu_edp = true;
5822                         break;
5823                 }
5824         }
5825
5826         if (HAS_PCH_IBX(dev)) {
5827                 has_ck505 = dev_priv->display_clock_mode;
5828                 can_ssc = has_ck505;
5829         } else {
5830                 has_ck505 = false;
5831                 can_ssc = true;
5832         }
5833
5834         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
5835                       has_panel, has_lvds, has_pch_edp, has_cpu_edp,
5836                       has_ck505);
5837
5838         /* Ironlake: try to setup display ref clock before DPLL
5839          * enabling. This is only under driver's control after
5840          * PCH B stepping, previous chipset stepping should be
5841          * ignoring this setting.
5842          */
5843         temp = I915_READ(PCH_DREF_CONTROL);
5844         /* Always enable nonspread source */
5845         temp &= ~DREF_NONSPREAD_SOURCE_MASK;
5846
5847         if (has_ck505)
5848                 temp |= DREF_NONSPREAD_CK505_ENABLE;
5849         else
5850                 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
5851
5852         if (has_panel) {
5853                 temp &= ~DREF_SSC_SOURCE_MASK;
5854                 temp |= DREF_SSC_SOURCE_ENABLE;
5855
5856                 /* SSC must be turned on before enabling the CPU output  */
5857                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5858                         DRM_DEBUG_KMS("Using SSC on panel\n");
5859                         temp |= DREF_SSC1_ENABLE;
5860                 }
5861
5862                 /* Get SSC going before enabling the outputs */
5863                 I915_WRITE(PCH_DREF_CONTROL, temp);
5864                 POSTING_READ(PCH_DREF_CONTROL);
5865                 udelay(200);
5866
5867                 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5868
5869                 /* Enable CPU source on CPU attached eDP */
5870                 if (has_cpu_edp) {
5871                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5872                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
5873                                 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5874                         }
5875                         else
5876                                 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5877                 } else
5878                         temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5879
5880                 I915_WRITE(PCH_DREF_CONTROL, temp);
5881                 POSTING_READ(PCH_DREF_CONTROL);
5882                 udelay(200);
5883         } else {
5884                 DRM_DEBUG_KMS("Disabling SSC entirely\n");
5885
5886                 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5887
5888                 /* Turn off CPU output */
5889                 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5890
5891                 I915_WRITE(PCH_DREF_CONTROL, temp);
5892                 POSTING_READ(PCH_DREF_CONTROL);
5893                 udelay(200);
5894
5895                 /* Turn off the SSC source */
5896                 temp &= ~DREF_SSC_SOURCE_MASK;
5897                 temp |= DREF_SSC_SOURCE_DISABLE;
5898
5899                 /* Turn off SSC1 */
5900                 temp &= ~ DREF_SSC1_ENABLE;
5901
5902                 I915_WRITE(PCH_DREF_CONTROL, temp);
5903                 POSTING_READ(PCH_DREF_CONTROL);
5904                 udelay(200);
5905         }
5906 }
5907
5908 static int ironlake_get_refclk(struct drm_crtc *crtc)
5909 {
5910         struct drm_device *dev = crtc->dev;
5911         struct drm_i915_private *dev_priv = dev->dev_private;
5912         struct intel_encoder *encoder;
5913         struct drm_mode_config *mode_config = &dev->mode_config;
5914         struct intel_encoder *edp_encoder = NULL;
5915         int num_connectors = 0;
5916         bool is_lvds = false;
5917
5918         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5919                 if (encoder->base.crtc != crtc)
5920                         continue;
5921
5922                 switch (encoder->type) {
5923                 case INTEL_OUTPUT_LVDS:
5924                         is_lvds = true;
5925                         break;
5926                 case INTEL_OUTPUT_EDP:
5927                         edp_encoder = encoder;
5928                         break;
5929                 }
5930                 num_connectors++;
5931         }
5932
5933         if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5934                 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5935                               dev_priv->lvds_ssc_freq);
5936                 return dev_priv->lvds_ssc_freq * 1000;
5937         }
5938
5939         return 120000;
5940 }
5941
5942 static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5943                                   struct drm_display_mode *mode,
5944                                   struct drm_display_mode *adjusted_mode,
5945                                   int x, int y,
5946                                   struct drm_framebuffer *old_fb)
5947 {
5948         struct drm_device *dev = crtc->dev;
5949         struct drm_i915_private *dev_priv = dev->dev_private;
5950         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5951         int pipe = intel_crtc->pipe;
5952         int plane = intel_crtc->plane;
5953         int refclk, num_connectors = 0;
5954         intel_clock_t clock, reduced_clock;
5955         u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
5956         bool ok, has_reduced_clock = false, is_sdvo = false;
5957         bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5958         struct intel_encoder *has_edp_encoder = NULL;
5959         struct drm_mode_config *mode_config = &dev->mode_config;
5960         struct intel_encoder *encoder;
5961         const intel_limit_t *limit;
5962         int ret;
5963         struct fdi_m_n m_n = {0};
5964         u32 temp;
5965         u32 lvds_sync = 0;
5966         int target_clock, pixel_multiplier, lane, link_bw, factor;
5967         unsigned int pipe_bpp;
5968         bool dither;
5969
5970         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5971                 if (encoder->base.crtc != crtc)
5972                         continue;
5973
5974                 switch (encoder->type) {
5975                 case INTEL_OUTPUT_LVDS:
5976                         is_lvds = true;
5977                         break;
5978                 case INTEL_OUTPUT_SDVO:
5979                 case INTEL_OUTPUT_HDMI:
5980                         is_sdvo = true;
5981                         if (encoder->needs_tv_clock)
5982                                 is_tv = true;
5983                         break;
5984                 case INTEL_OUTPUT_TVOUT:
5985                         is_tv = true;
5986                         break;
5987                 case INTEL_OUTPUT_ANALOG:
5988                         is_crt = true;
5989                         break;
5990                 case INTEL_OUTPUT_DISPLAYPORT:
5991                         is_dp = true;
5992                         break;
5993                 case INTEL_OUTPUT_EDP:
5994                         has_edp_encoder = encoder;
5995                         break;
5996                 }
5997
5998                 num_connectors++;
5999         }
6000
6001         refclk = ironlake_get_refclk(crtc);
6002
6003         /*
6004          * Returns a set of divisors for the desired target clock with the given
6005          * refclk, or FALSE.  The returned values represent the clock equation:
6006          * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
6007          */
6008         limit = intel_limit(crtc, refclk);
6009         ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
6010                              &clock);
6011         if (!ok) {
6012                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
6013                 return -EINVAL;
6014         }
6015
6016         /* Ensure that the cursor is valid for the new mode before changing... */
6017         intel_crtc_update_cursor(crtc, true);
6018
6019         if (is_lvds && dev_priv->lvds_downclock_avail) {
6020                 /*
6021                  * Ensure we match the reduced clock's P to the target clock.
6022                  * If the clocks don't match, we can't switch the display clock
6023                  * by using the FP0/FP1. In such case we will disable the LVDS
6024                  * downclock feature.
6025                 */
6026                 has_reduced_clock = limit->find_pll(limit, crtc,
6027                                                     dev_priv->lvds_downclock,
6028                                                     refclk,
6029                                                     &clock,
6030                                                     &reduced_clock);
6031         }
6032         /* SDVO TV has fixed PLL values depend on its clock range,
6033            this mirrors vbios setting. */
6034         if (is_sdvo && is_tv) {
6035                 if (adjusted_mode->clock >= 100000
6036                     && adjusted_mode->clock < 140500) {
6037                         clock.p1 = 2;
6038                         clock.p2 = 10;
6039                         clock.n = 3;
6040                         clock.m1 = 16;
6041                         clock.m2 = 8;
6042                 } else if (adjusted_mode->clock >= 140500
6043                            && adjusted_mode->clock <= 200000) {
6044                         clock.p1 = 1;
6045                         clock.p2 = 10;
6046                         clock.n = 6;
6047                         clock.m1 = 12;
6048                         clock.m2 = 8;
6049                 }
6050         }
6051
6052         /* FDI link */
6053         pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
6054         lane = 0;
6055         /* CPU eDP doesn't require FDI link, so just set DP M/N
6056            according to current link config */
6057         if (has_edp_encoder &&
6058             !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
6059                 target_clock = mode->clock;
6060                 intel_edp_link_config(has_edp_encoder,
6061                                       &lane, &link_bw);
6062         } else {
6063                 /* [e]DP over FDI requires target mode clock
6064                    instead of link clock */
6065                 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
6066                         target_clock = mode->clock;
6067                 else
6068                         target_clock = adjusted_mode->clock;
6069
6070                 /* FDI is a binary signal running at ~2.7GHz, encoding
6071                  * each output octet as 10 bits. The actual frequency
6072                  * is stored as a divider into a 100MHz clock, and the
6073                  * mode pixel clock is stored in units of 1KHz.
6074                  * Hence the bw of each lane in terms of the mode signal
6075                  * is:
6076                  */
6077                 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
6078         }
6079
6080         /* determine panel color depth */
6081         temp = I915_READ(PIPECONF(pipe));
6082         temp &= ~PIPE_BPC_MASK;
6083         dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
6084         switch (pipe_bpp) {
6085         case 18:
6086                 temp |= PIPE_6BPC;
6087                 break;
6088         case 24:
6089                 temp |= PIPE_8BPC;
6090                 break;
6091         case 30:
6092                 temp |= PIPE_10BPC;
6093                 break;
6094         case 36:
6095                 temp |= PIPE_12BPC;
6096                 break;
6097         default:
6098                 WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
6099                         pipe_bpp);
6100                 temp |= PIPE_8BPC;
6101                 pipe_bpp = 24;
6102                 break;
6103         }
6104
6105         intel_crtc->bpp = pipe_bpp;
6106         I915_WRITE(PIPECONF(pipe), temp);
6107
6108         if (!lane) {
6109                 /*
6110                  * Account for spread spectrum to avoid
6111                  * oversubscribing the link. Max center spread
6112                  * is 2.5%; use 5% for safety's sake.
6113                  */
6114                 u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
6115                 lane = bps / (link_bw * 8) + 1;
6116         }
6117
6118         intel_crtc->fdi_lanes = lane;
6119
6120         if (pixel_multiplier > 1)
6121                 link_bw *= pixel_multiplier;
6122         ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
6123                              &m_n);
6124
6125         fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
6126         if (has_reduced_clock)
6127                 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
6128                         reduced_clock.m2;
6129
6130         /* Enable autotuning of the PLL clock (if permissible) */
6131         factor = 21;
6132         if (is_lvds) {
6133                 if ((intel_panel_use_ssc(dev_priv) &&
6134                      dev_priv->lvds_ssc_freq == 100) ||
6135                     (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
6136                         factor = 25;
6137         } else if (is_sdvo && is_tv)
6138                 factor = 20;
6139
6140         if (clock.m < factor * clock.n)
6141                 fp |= FP_CB_TUNE;
6142
6143         dpll = 0;
6144
6145         if (is_lvds)
6146                 dpll |= DPLLB_MODE_LVDS;
6147         else
6148                 dpll |= DPLLB_MODE_DAC_SERIAL;
6149         if (is_sdvo) {
6150                 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
6151                 if (pixel_multiplier > 1) {
6152                         dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
6153                 }
6154                 dpll |= DPLL_DVO_HIGH_SPEED;
6155         }
6156         if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
6157                 dpll |= DPLL_DVO_HIGH_SPEED;
6158
6159         /* compute bitmask from p1 value */
6160         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6161         /* also FPA1 */
6162         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
6163
6164         switch (clock.p2) {
6165         case 5:
6166                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
6167                 break;
6168         case 7:
6169                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
6170                 break;
6171         case 10:
6172                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
6173                 break;
6174         case 14:
6175                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
6176                 break;
6177         }
6178
6179         if (is_sdvo && is_tv)
6180                 dpll |= PLL_REF_INPUT_TVCLKINBC;
6181         else if (is_tv)
6182                 /* XXX: just matching BIOS for now */
6183                 /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
6184                 dpll |= 3;
6185         else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
6186                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
6187         else
6188                 dpll |= PLL_REF_INPUT_DREFCLK;
6189
6190         /* setup pipeconf */
6191         pipeconf = I915_READ(PIPECONF(pipe));
6192
6193         /* Set up the display plane register */
6194         dspcntr = DISPPLANE_GAMMA_ENABLE;
6195
6196         DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
6197         drm_mode_debug_printmodeline(mode);
6198
6199         /* PCH eDP needs FDI, but CPU eDP does not */
6200         if (!intel_crtc->no_pll) {
6201                 if (!has_edp_encoder ||
6202                     intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
6203                         I915_WRITE(PCH_FP0(pipe), fp);
6204                         I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
6205
6206                         POSTING_READ(PCH_DPLL(pipe));
6207                         udelay(150);
6208                 }
6209         } else {
6210                 if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
6211                     fp == I915_READ(PCH_FP0(0))) {
6212                         intel_crtc->use_pll_a = true;
6213                         DRM_DEBUG_KMS("using pipe a dpll\n");
6214                 } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
6215                            fp == I915_READ(PCH_FP0(1))) {
6216                         intel_crtc->use_pll_a = false;
6217                         DRM_DEBUG_KMS("using pipe b dpll\n");
6218                 } else {
6219                         DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
6220                         return -EINVAL;
6221                 }
6222         }
6223
6224         /* The LVDS pin pair needs to be on before the DPLLs are enabled.
6225          * This is an exception to the general rule that mode_set doesn't turn
6226          * things on.
6227          */
6228         if (is_lvds) {
6229                 temp = I915_READ(PCH_LVDS);
6230                 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
6231                 if (HAS_PCH_CPT(dev)) {
6232                         temp &= ~PORT_TRANS_SEL_MASK;
6233                         temp |= PORT_TRANS_SEL_CPT(pipe);
6234                 } else {
6235                         if (pipe == 1)
6236                                 temp |= LVDS_PIPEB_SELECT;
6237                         else
6238                                 temp &= ~LVDS_PIPEB_SELECT;
6239                 }
6240
6241                 /* set the corresponsding LVDS_BORDER bit */
6242                 temp |= dev_priv->lvds_border_bits;
6243                 /* Set the B0-B3 data pairs corresponding to whether we're going to
6244                  * set the DPLLs for dual-channel mode or not.
6245                  */
6246                 if (clock.p2 == 7)
6247                         temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
6248                 else
6249                         temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
6250
6251                 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
6252                  * appropriately here, but we need to look more thoroughly into how
6253                  * panels behave in the two modes.
6254                  */
6255                 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
6256                         lvds_sync |= LVDS_HSYNC_POLARITY;
6257                 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
6258                         lvds_sync |= LVDS_VSYNC_POLARITY;
6259                 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
6260                     != lvds_sync) {
6261                         char flags[2] = "-+";
6262                         DRM_INFO("Changing LVDS panel from "
6263                                  "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
6264                                  flags[!(temp & LVDS_HSYNC_POLARITY)],
6265                                  flags[!(temp & LVDS_VSYNC_POLARITY)],
6266                                  flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
6267                                  flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
6268                         temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
6269                         temp |= lvds_sync;
6270                 }
6271                 I915_WRITE(PCH_LVDS, temp);
6272         }
6273
6274         pipeconf &= ~PIPECONF_DITHER_EN;
6275         pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
6276         if ((is_lvds && dev_priv->lvds_dither) || dither) {
6277                 pipeconf |= PIPECONF_DITHER_EN;
6278                 pipeconf |= PIPECONF_DITHER_TYPE_SP;
6279         }
6280         if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
6281                 intel_dp_set_m_n(crtc, mode, adjusted_mode);
6282         } else {
6283                 /* For non-DP output, clear any trans DP clock recovery setting.*/
6284                 I915_WRITE(TRANSDATA_M1(pipe), 0);
6285                 I915_WRITE(TRANSDATA_N1(pipe), 0);
6286                 I915_WRITE(TRANSDPLINK_M1(pipe), 0);
6287                 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
6288         }
6289
6290         if (!intel_crtc->no_pll &&
6291             (!has_edp_encoder ||
6292              intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
6293                 I915_WRITE(PCH_DPLL(pipe), dpll);
6294
6295                 /* Wait for the clocks to stabilize. */
6296                 POSTING_READ(PCH_DPLL(pipe));
6297                 udelay(150);
6298
6299                 /* The pixel multiplier can only be updated once the
6300                  * DPLL is enabled and the clocks are stable.
6301                  *
6302                  * So write it again.
6303                  */
6304                 I915_WRITE(PCH_DPLL(pipe), dpll);
6305         }
6306
6307         intel_crtc->lowfreq_avail = false;
6308         if (!intel_crtc->no_pll) {
6309                 if (is_lvds && has_reduced_clock && i915_powersave) {
6310                         I915_WRITE(PCH_FP1(pipe), fp2);
6311                         intel_crtc->lowfreq_avail = true;
6312                         if (HAS_PIPE_CXSR(dev)) {
6313                                 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
6314                                 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
6315                         }
6316                 } else {
6317                         I915_WRITE(PCH_FP1(pipe), fp);
6318                         if (HAS_PIPE_CXSR(dev)) {
6319                                 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
6320                                 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
6321                         }
6322                 }
6323         }
6324
6325         pipeconf &= ~PIPECONF_INTERLACE_MASK;
6326         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
6327                 pipeconf |= PIPECONF_INTERLACED_ILK;
6328                 /* the chip adds 2 halflines automatically */
6329                 adjusted_mode->crtc_vtotal -= 1;
6330                 adjusted_mode->crtc_vblank_end -= 1;
6331                 I915_WRITE(VSYNCSHIFT(pipe),
6332                            adjusted_mode->crtc_hsync_start
6333                            - adjusted_mode->crtc_htotal/2);
6334         } else {
6335                 pipeconf |= PIPECONF_PROGRESSIVE;
6336                 I915_WRITE(VSYNCSHIFT(pipe), 0);
6337         }
6338
6339         I915_WRITE(HTOTAL(pipe),
6340                    (adjusted_mode->crtc_hdisplay - 1) |
6341                    ((adjusted_mode->crtc_htotal - 1) << 16));
6342         I915_WRITE(HBLANK(pipe),
6343                    (adjusted_mode->crtc_hblank_start - 1) |
6344                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
6345         I915_WRITE(HSYNC(pipe),
6346                    (adjusted_mode->crtc_hsync_start - 1) |
6347                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
6348
6349         I915_WRITE(VTOTAL(pipe),
6350                    (adjusted_mode->crtc_vdisplay - 1) |
6351                    ((adjusted_mode->crtc_vtotal - 1) << 16));
6352         I915_WRITE(VBLANK(pipe),
6353                    (adjusted_mode->crtc_vblank_start - 1) |
6354                    ((adjusted_mode->crtc_vblank_end - 1) << 16));
6355         I915_WRITE(VSYNC(pipe),
6356                    (adjusted_mode->crtc_vsync_start - 1) |
6357                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
6358
6359         /* pipesrc controls the size that is scaled from, which should
6360          * always be the user's requested size.
6361          */
6362         I915_WRITE(PIPESRC(pipe),
6363                    ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
6364
6365         I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
6366         I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
6367         I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
6368         I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
6369
6370         if (has_edp_encoder &&
6371             !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
6372                 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
6373         }
6374
6375         I915_WRITE(PIPECONF(pipe), pipeconf);
6376         POSTING_READ(PIPECONF(pipe));
6377
6378         intel_wait_for_vblank(dev, pipe);
6379
6380         I915_WRITE(DSPCNTR(plane), dspcntr);
6381         POSTING_READ(DSPCNTR(plane));
6382
6383         ret = intel_pipe_set_base(crtc, x, y, old_fb);
6384
6385         intel_update_watermarks(dev);
6386
6387         return ret;
6388 }
6389
6390 static int intel_crtc_mode_set(struct drm_crtc *crtc,
6391                                struct drm_display_mode *mode,
6392                                struct drm_display_mode *adjusted_mode,
6393                                int x, int y,
6394                                struct drm_framebuffer *old_fb)
6395 {
6396         struct drm_device *dev = crtc->dev;
6397         struct drm_i915_private *dev_priv = dev->dev_private;
6398         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6399         int pipe = intel_crtc->pipe;
6400         int ret;
6401
6402         drm_vblank_pre_modeset(dev, pipe);
6403
6404         ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
6405                                               x, y, old_fb);
6406         drm_vblank_post_modeset(dev, pipe);
6407
6408         if (ret)
6409                 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
6410         else
6411                 intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
6412
6413         return ret;
6414 }
6415
6416 static bool intel_eld_uptodate(struct drm_connector *connector,
6417                                int reg_eldv, uint32_t bits_eldv,
6418                                int reg_elda, uint32_t bits_elda,
6419                                int reg_edid)
6420 {
6421         struct drm_i915_private *dev_priv = connector->dev->dev_private;
6422         uint8_t *eld = connector->eld;
6423         uint32_t i;
6424
6425         i = I915_READ(reg_eldv);
6426         i &= bits_eldv;
6427
6428         if (!eld[0])
6429                 return !i;
6430
6431         if (!i)
6432                 return false;
6433
6434         i = I915_READ(reg_elda);
6435         i &= ~bits_elda;
6436         I915_WRITE(reg_elda, i);
6437
6438         for (i = 0; i < eld[2]; i++)
6439                 if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
6440                         return false;
6441
6442         return true;
6443 }
6444
6445 static void g4x_write_eld(struct drm_connector *connector,
6446                           struct drm_crtc *crtc)
6447 {
6448         struct drm_i915_private *dev_priv = connector->dev->dev_private;
6449         uint8_t *eld = connector->eld;
6450         uint32_t eldv;
6451         uint32_t len;
6452         uint32_t i;
6453
6454         i = I915_READ(G4X_AUD_VID_DID);
6455
6456         if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
6457                 eldv = G4X_ELDV_DEVCL_DEVBLC;
6458         else
6459                 eldv = G4X_ELDV_DEVCTG;
6460
6461         if (intel_eld_uptodate(connector,
6462                                G4X_AUD_CNTL_ST, eldv,
6463                                G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
6464                                G4X_HDMIW_HDMIEDID))
6465                 return;
6466
6467         i = I915_READ(G4X_AUD_CNTL_ST);
6468         i &= ~(eldv | G4X_ELD_ADDR);
6469         len = (i >> 9) & 0x1f;          /* ELD buffer size */
6470         I915_WRITE(G4X_AUD_CNTL_ST, i);
6471
6472         if (!eld[0])
6473                 return;
6474
6475         len = min_t(uint8_t, eld[2], len);
6476         DRM_DEBUG_DRIVER("ELD size %d\n", len);
6477         for (i = 0; i < len; i++)
6478                 I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
6479
6480         i = I915_READ(G4X_AUD_CNTL_ST);
6481         i |= eldv;
6482         I915_WRITE(G4X_AUD_CNTL_ST, i);
6483 }
6484
6485 static void ironlake_write_eld(struct drm_connector *connector,
6486                                      struct drm_crtc *crtc)
6487 {
6488         struct drm_i915_private *dev_priv = connector->dev->dev_private;
6489         uint8_t *eld = connector->eld;
6490         uint32_t eldv;
6491         uint32_t i;
6492         int len;
6493         int hdmiw_hdmiedid;
6494         int aud_config;
6495         int aud_cntl_st;
6496         int aud_cntrl_st2;
6497
6498         if (HAS_PCH_IBX(connector->dev)) {
6499                 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
6500                 aud_config = IBX_AUD_CONFIG_A;
6501                 aud_cntl_st = IBX_AUD_CNTL_ST_A;
6502                 aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
6503         } else {
6504                 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
6505                 aud_config = CPT_AUD_CONFIG_A;
6506                 aud_cntl_st = CPT_AUD_CNTL_ST_A;
6507                 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
6508         }
6509
6510         i = to_intel_crtc(crtc)->pipe;
6511         hdmiw_hdmiedid += i * 0x100;
6512         aud_cntl_st += i * 0x100;
6513         aud_config += i * 0x100;
6514
6515         DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
6516
6517         i = I915_READ(aud_cntl_st);
6518         i = (i >> 29) & 0x3;            /* DIP_Port_Select, 0x1 = PortB */
6519         if (!i) {
6520                 DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
6521                 /* operate blindly on all ports */
6522                 eldv = IBX_ELD_VALIDB;
6523                 eldv |= IBX_ELD_VALIDB << 4;
6524                 eldv |= IBX_ELD_VALIDB << 8;
6525         } else {
6526                 DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
6527                 eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
6528         }
6529
6530         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
6531                 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6532                 eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
6533                 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
6534         } else
6535                 I915_WRITE(aud_config, 0);
6536
6537         if (intel_eld_uptodate(connector,
6538                                aud_cntrl_st2, eldv,
6539                                aud_cntl_st, IBX_ELD_ADDRESS,
6540                                hdmiw_hdmiedid))
6541                 return;
6542
6543         i = I915_READ(aud_cntrl_st2);
6544         i &= ~eldv;
6545         I915_WRITE(aud_cntrl_st2, i);
6546
6547         if (!eld[0])
6548                 return;
6549
6550         i = I915_READ(aud_cntl_st);
6551         i &= ~IBX_ELD_ADDRESS;
6552         I915_WRITE(aud_cntl_st, i);
6553
6554         len = min_t(uint8_t, eld[2], 21);       /* 84 bytes of hw ELD buffer */
6555         DRM_DEBUG_DRIVER("ELD size %d\n", len);
6556         for (i = 0; i < len; i++)
6557                 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
6558
6559         i = I915_READ(aud_cntrl_st2);
6560         i |= eldv;
6561         I915_WRITE(aud_cntrl_st2, i);
6562 }
6563
6564 void intel_write_eld(struct drm_encoder *encoder,
6565                      struct drm_display_mode *mode)
6566 {
6567         struct drm_crtc *crtc = encoder->crtc;
6568         struct drm_connector *connector;
6569         struct drm_device *dev = encoder->dev;
6570         struct drm_i915_private *dev_priv = dev->dev_private;
6571
6572         connector = drm_select_eld(encoder, mode);
6573         if (!connector)
6574                 return;
6575
6576         DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6577                          connector->base.id,
6578                          drm_get_connector_name(connector),
6579                          connector->encoder->base.id,
6580                          drm_get_encoder_name(connector->encoder));
6581
6582         connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
6583
6584         if (dev_priv->display.write_eld)
6585                 dev_priv->display.write_eld(connector, crtc);
6586 }
6587
6588 /** Loads the palette/gamma unit for the CRTC with the prepared values */
6589 void intel_crtc_load_lut(struct drm_crtc *crtc)
6590 {
6591         struct drm_device *dev = crtc->dev;
6592         struct drm_i915_private *dev_priv = dev->dev_private;
6593         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6594         int palreg = PALETTE(intel_crtc->pipe);
6595         int i;
6596
6597         /* The clocks have to be on to load the palette. */
6598         if (!crtc->enabled)
6599                 return;
6600
6601         /* use legacy palette for Ironlake */
6602         if (HAS_PCH_SPLIT(dev))
6603                 palreg = LGC_PALETTE(intel_crtc->pipe);
6604
6605         for (i = 0; i < 256; i++) {
6606                 I915_WRITE(palreg + 4 * i,
6607                            (intel_crtc->lut_r[i] << 16) |
6608                            (intel_crtc->lut_g[i] << 8) |
6609                            intel_crtc->lut_b[i]);
6610         }
6611 }
6612
6613 static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
6614 {
6615         struct drm_device *dev = crtc->dev;
6616         struct drm_i915_private *dev_priv = dev->dev_private;
6617         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6618         bool visible = base != 0;
6619         u32 cntl;
6620
6621         if (intel_crtc->cursor_visible == visible)
6622                 return;
6623
6624         cntl = I915_READ(_CURACNTR);
6625         if (visible) {
6626                 /* On these chipsets we can only modify the base whilst
6627                  * the cursor is disabled.
6628                  */
6629                 I915_WRITE(_CURABASE, base);
6630
6631                 cntl &= ~(CURSOR_FORMAT_MASK);
6632                 /* XXX width must be 64, stride 256 => 0x00 << 28 */
6633                 cntl |= CURSOR_ENABLE |
6634                         CURSOR_GAMMA_ENABLE |
6635                         CURSOR_FORMAT_ARGB;
6636         } else
6637                 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
6638         I915_WRITE(_CURACNTR, cntl);
6639
6640         intel_crtc->cursor_visible = visible;
6641 }
6642
6643 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
6644 {
6645         struct drm_device *dev = crtc->dev;
6646         struct drm_i915_private *dev_priv = dev->dev_private;
6647         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6648         int pipe = intel_crtc->pipe;
6649         bool visible = base != 0;
6650
6651         if (intel_crtc->cursor_visible != visible) {
6652                 uint32_t cntl = I915_READ(CURCNTR(pipe));
6653                 if (base) {
6654                         cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
6655                         cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
6656                         cntl |= pipe << 28; /* Connect to correct pipe */
6657                 } else {
6658                         cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6659                         cntl |= CURSOR_MODE_DISABLE;
6660                 }
6661                 I915_WRITE(CURCNTR(pipe), cntl);
6662
6663                 intel_crtc->cursor_visible = visible;
6664         }
6665         /* and commit changes on next vblank */
6666         I915_WRITE(CURBASE(pipe), base);
6667 }
6668
6669 static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
6670 {
6671         struct drm_device *dev = crtc->dev;
6672         struct drm_i915_private *dev_priv = dev->dev_private;
6673         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6674         int pipe = intel_crtc->pipe;
6675         bool visible = base != 0;
6676
6677         if (intel_crtc->cursor_visible != visible) {
6678                 uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
6679                 if (base) {
6680                         cntl &= ~CURSOR_MODE;
6681                         cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
6682                 } else {
6683                         cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6684                         cntl |= CURSOR_MODE_DISABLE;
6685                 }
6686                 I915_WRITE(CURCNTR_IVB(pipe), cntl);
6687
6688                 intel_crtc->cursor_visible = visible;
6689         }
6690         /* and commit changes on next vblank */
6691         I915_WRITE(CURBASE_IVB(pipe), base);
6692 }
6693
6694 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
6695 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
6696                                      bool on)
6697 {
6698         struct drm_device *dev = crtc->dev;
6699         struct drm_i915_private *dev_priv = dev->dev_private;
6700         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6701         int pipe = intel_crtc->pipe;
6702         int x = intel_crtc->cursor_x;
6703         int y = intel_crtc->cursor_y;
6704         u32 base, pos;
6705         bool visible;
6706
6707         pos = 0;
6708
6709         if (on && crtc->enabled && crtc->fb) {
6710                 base = intel_crtc->cursor_addr;
6711                 if (x > (int) crtc->fb->width)
6712                         base = 0;
6713
6714                 if (y > (int) crtc->fb->height)
6715                         base = 0;
6716         } else
6717                 base = 0;
6718
6719         if (x < 0) {
6720                 if (x + intel_crtc->cursor_width < 0)
6721                         base = 0;
6722
6723                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
6724                 x = -x;
6725         }
6726         pos |= x << CURSOR_X_SHIFT;
6727
6728         if (y < 0) {
6729                 if (y + intel_crtc->cursor_height < 0)
6730                         base = 0;
6731
6732                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
6733                 y = -y;
6734         }
6735         pos |= y << CURSOR_Y_SHIFT;
6736
6737         visible = base != 0;
6738         if (!visible && !intel_crtc->cursor_visible)
6739                 return;
6740
6741         if (IS_IVYBRIDGE(dev)) {
6742                 I915_WRITE(CURPOS_IVB(pipe), pos);
6743                 ivb_update_cursor(crtc, base);
6744         } else {
6745                 I915_WRITE(CURPOS(pipe), pos);
6746                 if (IS_845G(dev) || IS_I865G(dev))
6747                         i845_update_cursor(crtc, base);
6748                 else
6749                         i9xx_update_cursor(crtc, base);
6750         }
6751
6752         if (visible)
6753                 intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
6754 }
6755
6756 static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6757                                  struct drm_file *file,
6758                                  uint32_t handle,
6759                                  uint32_t width, uint32_t height)
6760 {
6761         struct drm_device *dev = crtc->dev;
6762         struct drm_i915_private *dev_priv = dev->dev_private;
6763         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6764         struct drm_i915_gem_object *obj;
6765         uint32_t addr;
6766         int ret;
6767
6768         DRM_DEBUG_KMS("\n");
6769
6770         /* if we want to turn off the cursor ignore width and height */
6771         if (!handle) {
6772                 DRM_DEBUG_KMS("cursor off\n");
6773                 addr = 0;
6774                 obj = NULL;
6775                 mutex_lock(&dev->struct_mutex);
6776                 goto finish;
6777         }
6778
6779         /* Currently we only support 64x64 cursors */
6780         if (width != 64 || height != 64) {
6781                 DRM_ERROR("we currently only support 64x64 cursors\n");
6782                 return -EINVAL;
6783         }
6784
6785         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
6786         if (&obj->base == NULL)
6787                 return -ENOENT;
6788
6789         if (obj->base.size < width * height * 4) {
6790                 DRM_ERROR("buffer is to small\n");
6791                 ret = -ENOMEM;
6792                 goto fail;
6793         }
6794
6795         /* we only need to pin inside GTT if cursor is non-phy */
6796         mutex_lock(&dev->struct_mutex);
6797         if (!dev_priv->info->cursor_needs_physical) {
6798                 if (obj->tiling_mode) {
6799                         DRM_ERROR("cursor cannot be tiled\n");
6800                         ret = -EINVAL;
6801                         goto fail_locked;
6802                 }
6803
6804                 ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
6805                 if (ret) {
6806                         DRM_ERROR("failed to move cursor bo into the GTT\n");
6807                         goto fail_locked;
6808                 }
6809
6810                 ret = i915_gem_object_put_fence(obj);
6811                 if (ret) {
6812                         DRM_ERROR("failed to release fence for cursor");
6813                         goto fail_unpin;
6814                 }
6815
6816                 addr = obj->gtt_offset;
6817         } else {
6818                 int align = IS_I830(dev) ? 16 * 1024 : 256;
6819                 ret = i915_gem_attach_phys_object(dev, obj,
6820                                                   (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
6821                                                   align);
6822                 if (ret) {
6823                         DRM_ERROR("failed to attach phys object\n");
6824                         goto fail_locked;
6825                 }
6826                 addr = obj->phys_obj->handle->busaddr;
6827         }
6828
6829         if (IS_GEN2(dev))
6830                 I915_WRITE(CURSIZE, (height << 12) | width);
6831
6832  finish:
6833         if (intel_crtc->cursor_bo) {
6834                 if (dev_priv->info->cursor_needs_physical) {
6835                         if (intel_crtc->cursor_bo != obj)
6836                                 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
6837                 } else
6838                         i915_gem_object_unpin(intel_crtc->cursor_bo);
6839                 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
6840         }
6841
6842         mutex_unlock(&dev->struct_mutex);
6843
6844         intel_crtc->cursor_addr = addr;
6845         intel_crtc->cursor_bo = obj;
6846         intel_crtc->cursor_width = width;
6847         intel_crtc->cursor_height = height;
6848
6849         intel_crtc_update_cursor(crtc, true);
6850
6851         return 0;
6852 fail_unpin:
6853         i915_gem_object_unpin(obj);
6854 fail_locked:
6855         mutex_unlock(&dev->struct_mutex);
6856 fail:
6857         drm_gem_object_unreference_unlocked(&obj->base);
6858         return ret;
6859 }
6860
6861 static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
6862 {
6863         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6864
6865         intel_crtc->cursor_x = x;
6866         intel_crtc->cursor_y = y;
6867
6868         intel_crtc_update_cursor(crtc, true);
6869
6870         return 0;
6871 }
6872
6873 /** Sets the color ramps on behalf of RandR */
6874 void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
6875                                  u16 blue, int regno)
6876 {
6877         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6878
6879         intel_crtc->lut_r[regno] = red >> 8;
6880         intel_crtc->lut_g[regno] = green >> 8;
6881         intel_crtc->lut_b[regno] = blue >> 8;
6882 }
6883
6884 void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
6885                              u16 *blue, int regno)
6886 {
6887         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6888
6889         *red = intel_crtc->lut_r[regno] << 8;
6890         *green = intel_crtc->lut_g[regno] << 8;
6891         *blue = intel_crtc->lut_b[regno] << 8;
6892 }
6893
6894 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
6895                                  u16 *blue, uint32_t start, uint32_t size)
6896 {
6897         int end = (start + size > 256) ? 256 : start + size, i;
6898         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6899
6900         for (i = start; i < end; i++) {
6901                 intel_crtc->lut_r[i] = red[i] >> 8;
6902                 intel_crtc->lut_g[i] = green[i] >> 8;
6903                 intel_crtc->lut_b[i] = blue[i] >> 8;
6904         }
6905
6906         intel_crtc_load_lut(crtc);
6907 }
6908
6909 /**
6910  * Get a pipe with a simple mode set on it for doing load-based monitor
6911  * detection.
6912  *
6913  * It will be up to the load-detect code to adjust the pipe as appropriate for
6914  * its requirements.  The pipe will be connected to no other encoders.
6915  *
6916  * Currently this code will only succeed if there is a pipe with no encoders
6917  * configured for it.  In the future, it could choose to temporarily disable
6918  * some outputs to free up a pipe for its use.
6919  *
6920  * \return crtc, or NULL if no pipes are available.
6921  */
6922
6923 /* VESA 640x480x72Hz mode to set on the pipe */
6924 static struct drm_display_mode load_detect_mode = {
6925         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6926                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6927 };
6928
6929 static struct drm_framebuffer *
6930 intel_framebuffer_create(struct drm_device *dev,
6931                          struct drm_mode_fb_cmd2 *mode_cmd,
6932                          struct drm_i915_gem_object *obj)
6933 {
6934         struct intel_framebuffer *intel_fb;
6935         int ret;
6936
6937         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6938         if (!intel_fb) {
6939                 drm_gem_object_unreference_unlocked(&obj->base);
6940                 return ERR_PTR(-ENOMEM);
6941         }
6942
6943         ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
6944         if (ret) {
6945                 drm_gem_object_unreference_unlocked(&obj->base);
6946                 kfree(intel_fb);
6947                 return ERR_PTR(ret);
6948         }
6949
6950         return &intel_fb->base;
6951 }
6952
6953 static u32
6954 intel_framebuffer_pitch_for_width(int width, int bpp)
6955 {
6956         u32 pitch = DIV_ROUND_UP(width * bpp, 8);
6957         return ALIGN(pitch, 64);
6958 }
6959
6960 static u32
6961 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
6962 {
6963         u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
6964         return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
6965 }
6966
6967 static struct drm_framebuffer *
6968 intel_framebuffer_create_for_mode(struct drm_device *dev,
6969                                   struct drm_display_mode *mode,
6970                                   int depth, int bpp)
6971 {
6972         struct drm_i915_gem_object *obj;
6973         struct drm_mode_fb_cmd2 mode_cmd;
6974
6975         obj = i915_gem_alloc_object(dev,
6976                                     intel_framebuffer_size_for_mode(mode, bpp));
6977         if (obj == NULL)
6978                 return ERR_PTR(-ENOMEM);
6979
6980         mode_cmd.width = mode->hdisplay;
6981         mode_cmd.height = mode->vdisplay;
6982         mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
6983                                                                 bpp);
6984         mode_cmd.pixel_format = 0;
6985
6986         return intel_framebuffer_create(dev, &mode_cmd, obj);
6987 }
6988
6989 static struct drm_framebuffer *
6990 mode_fits_in_fbdev(struct drm_device *dev,
6991                    struct drm_display_mode *mode)
6992 {
6993         struct drm_i915_private *dev_priv = dev->dev_private;
6994         struct drm_i915_gem_object *obj;
6995         struct drm_framebuffer *fb;
6996
6997         if (dev_priv->fbdev == NULL)
6998                 return NULL;
6999
7000         obj = dev_priv->fbdev->ifb.obj;
7001         if (obj == NULL)
7002                 return NULL;
7003
7004         fb = &dev_priv->fbdev->ifb.base;
7005         if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
7006                                                                fb->bits_per_pixel))
7007                 return NULL;
7008
7009         if (obj->base.size < mode->vdisplay * fb->pitches[0])
7010                 return NULL;
7011
7012         return fb;
7013 }
7014
7015 bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
7016                                 struct drm_connector *connector,
7017                                 struct drm_display_mode *mode,
7018                                 struct intel_load_detect_pipe *old)
7019 {
7020         struct intel_crtc *intel_crtc;
7021         struct drm_crtc *possible_crtc;
7022         struct drm_encoder *encoder = &intel_encoder->base;
7023         struct drm_crtc *crtc = NULL;
7024         struct drm_device *dev = encoder->dev;
7025         struct drm_framebuffer *old_fb;
7026         int i = -1;
7027
7028         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7029                       connector->base.id, drm_get_connector_name(connector),
7030                       encoder->base.id, drm_get_encoder_name(encoder));
7031
7032         /*
7033          * Algorithm gets a little messy:
7034          *
7035          *   - if the connector already has an assigned crtc, use it (but make
7036          *     sure it's on first)
7037          *
7038          *   - try to find the first unused crtc that can drive this connector,
7039          *     and use that if we find one
7040          */
7041
7042         /* See if we already have a CRTC for this connector */
7043         if (encoder->crtc) {
7044                 crtc = encoder->crtc;
7045
7046                 intel_crtc = to_intel_crtc(crtc);
7047                 old->dpms_mode = intel_crtc->dpms_mode;
7048                 old->load_detect_temp = false;
7049
7050                 /* Make sure the crtc and connector are running */
7051                 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
7052                         struct drm_encoder_helper_funcs *encoder_funcs;
7053                         struct drm_crtc_helper_funcs *crtc_funcs;
7054
7055                         crtc_funcs = crtc->helper_private;
7056                         crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
7057
7058                         encoder_funcs = encoder->helper_private;
7059                         encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
7060                 }
7061
7062                 return true;
7063         }
7064
7065         /* Find an unused one (if possible) */
7066         list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
7067                 i++;
7068                 if (!(encoder->possible_crtcs & (1 << i)))
7069                         continue;
7070                 if (!possible_crtc->enabled) {
7071                         crtc = possible_crtc;
7072                         break;
7073                 }
7074         }
7075
7076         /*
7077          * If we didn't find an unused CRTC, don't use any.
7078          */
7079         if (!crtc) {
7080                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
7081                 return false;
7082         }
7083
7084         encoder->crtc = crtc;
7085         connector->encoder = encoder;
7086
7087         intel_crtc = to_intel_crtc(crtc);
7088         old->dpms_mode = intel_crtc->dpms_mode;
7089         old->load_detect_temp = true;
7090         old->release_fb = NULL;
7091
7092         if (!mode)
7093                 mode = &load_detect_mode;
7094
7095         old_fb = crtc->fb;
7096
7097         /* We need a framebuffer large enough to accommodate all accesses
7098          * that the plane may generate whilst we perform load detection.
7099          * We can not rely on the fbcon either being present (we get called
7100          * during its initialisation to detect all boot displays, or it may
7101          * not even exist) or that it is large enough to satisfy the
7102          * requested mode.
7103          */
7104         crtc->fb = mode_fits_in_fbdev(dev, mode);
7105         if (crtc->fb == NULL) {
7106                 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
7107                 crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
7108                 old->release_fb = crtc->fb;
7109         } else
7110                 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
7111         if (IS_ERR(crtc->fb)) {
7112                 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
7113                 crtc->fb = old_fb;
7114                 return false;
7115         }
7116
7117         if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
7118                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
7119                 if (old->release_fb)
7120                         old->release_fb->funcs->destroy(old->release_fb);
7121                 crtc->fb = old_fb;
7122                 return false;
7123         }
7124
7125         /* let the connector get through one full cycle before testing */
7126         intel_wait_for_vblank(dev, intel_crtc->pipe);
7127
7128         return true;
7129 }
7130
7131 void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
7132                                     struct drm_connector *connector,
7133                                     struct intel_load_detect_pipe *old)
7134 {
7135         struct drm_encoder *encoder = &intel_encoder->base;
7136         struct drm_device *dev = encoder->dev;
7137         struct drm_crtc *crtc = encoder->crtc;
7138         struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
7139         struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
7140
7141         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7142                       connector->base.id, drm_get_connector_name(connector),
7143                       encoder->base.id, drm_get_encoder_name(encoder));
7144
7145         if (old->load_detect_temp) {
7146                 connector->encoder = NULL;
7147                 drm_helper_disable_unused_functions(dev);
7148
7149                 if (old->release_fb)
7150                         old->release_fb->funcs->destroy(old->release_fb);
7151
7152                 return;
7153         }
7154
7155         /* Switch crtc and encoder back off if necessary */
7156         if (old->dpms_mode != DRM_MODE_DPMS_ON) {
7157                 encoder_funcs->dpms(encoder, old->dpms_mode);
7158                 crtc_funcs->dpms(crtc, old->dpms_mode);
7159         }
7160 }
7161
7162 /* Returns the clock of the currently programmed mode of the given pipe. */
7163 static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
7164 {
7165         struct drm_i915_private *dev_priv = dev->dev_private;
7166         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7167         int pipe = intel_crtc->pipe;
7168         u32 dpll = I915_READ(DPLL(pipe));
7169         u32 fp;
7170         intel_clock_t clock;
7171
7172         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
7173                 fp = I915_READ(FP0(pipe));
7174         else
7175                 fp = I915_READ(FP1(pipe));
7176
7177         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
7178         if (IS_PINEVIEW(dev)) {
7179                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
7180                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
7181         } else {
7182                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
7183                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
7184         }
7185
7186         if (!IS_GEN2(dev)) {
7187                 if (IS_PINEVIEW(dev))
7188                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
7189                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
7190                 else
7191                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
7192                                DPLL_FPA01_P1_POST_DIV_SHIFT);
7193
7194                 switch (dpll & DPLL_MODE_MASK) {
7195                 case DPLLB_MODE_DAC_SERIAL:
7196                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
7197                                 5 : 10;
7198                         break;
7199                 case DPLLB_MODE_LVDS:
7200                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
7201                                 7 : 14;
7202                         break;
7203                 default:
7204                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
7205                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
7206                         return 0;
7207                 }
7208
7209                 /* XXX: Handle the 100Mhz refclk */
7210                 intel_clock(dev, 96000, &clock);
7211         } else {
7212                 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
7213
7214                 if (is_lvds) {
7215                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
7216                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
7217                         clock.p2 = 14;
7218
7219                         if ((dpll & PLL_REF_INPUT_MASK) ==
7220                             PLLB_REF_INPUT_SPREADSPECTRUMIN) {
7221                                 /* XXX: might not be 66MHz */
7222                                 intel_clock(dev, 66000, &clock);
7223                         } else
7224                                 intel_clock(dev, 48000, &clock);
7225                 } else {
7226                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
7227                                 clock.p1 = 2;
7228                         else {
7229                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
7230                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
7231                         }
7232                         if (dpll & PLL_P2_DIVIDE_BY_4)
7233                                 clock.p2 = 4;
7234                         else
7235                                 clock.p2 = 2;
7236
7237                         intel_clock(dev, 48000, &clock);
7238                 }
7239         }
7240
7241         /* XXX: It would be nice to validate the clocks, but we can't reuse
7242          * i830PllIsValid() because it relies on the xf86_config connector
7243          * configuration being accurate, which it isn't necessarily.
7244          */
7245
7246         return clock.dot;
7247 }
7248
7249 /** Returns the currently programmed mode of the given pipe. */
7250 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
7251                                              struct drm_crtc *crtc)
7252 {
7253         struct drm_i915_private *dev_priv = dev->dev_private;
7254         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7255         int pipe = intel_crtc->pipe;
7256         struct drm_display_mode *mode;
7257         int htot = I915_READ(HTOTAL(pipe));
7258         int hsync = I915_READ(HSYNC(pipe));
7259         int vtot = I915_READ(VTOTAL(pipe));
7260         int vsync = I915_READ(VSYNC(pipe));
7261
7262         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
7263         if (!mode)
7264                 return NULL;
7265
7266         mode->clock = intel_crtc_clock_get(dev, crtc);
7267         mode->hdisplay = (htot & 0xffff) + 1;
7268         mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
7269         mode->hsync_start = (hsync & 0xffff) + 1;
7270         mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
7271         mode->vdisplay = (vtot & 0xffff) + 1;
7272         mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
7273         mode->vsync_start = (vsync & 0xffff) + 1;
7274         mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
7275
7276         drm_mode_set_name(mode);
7277         drm_mode_set_crtcinfo(mode, 0);
7278
7279         return mode;
7280 }
7281
7282 #define GPU_IDLE_TIMEOUT 500 /* ms */
7283
7284 /* When this timer fires, we've been idle for awhile */
7285 static void intel_gpu_idle_timer(unsigned long arg)
7286 {
7287         struct drm_device *dev = (struct drm_device *)arg;
7288         drm_i915_private_t *dev_priv = dev->dev_private;
7289
7290         if (!list_empty(&dev_priv->mm.active_list)) {
7291                 /* Still processing requests, so just re-arm the timer. */
7292                 mod_timer(&dev_priv->idle_timer, jiffies +
7293                           msecs_to_jiffies(GPU_IDLE_TIMEOUT));
7294                 return;
7295         }
7296
7297         dev_priv->busy = false;
7298         queue_work(dev_priv->wq, &dev_priv->idle_work);
7299 }
7300
7301 #define CRTC_IDLE_TIMEOUT 1000 /* ms */
7302
7303 static void intel_crtc_idle_timer(unsigned long arg)
7304 {
7305         struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
7306         struct drm_crtc *crtc = &intel_crtc->base;
7307         drm_i915_private_t *dev_priv = crtc->dev->dev_private;
7308         struct intel_framebuffer *intel_fb;
7309
7310         intel_fb = to_intel_framebuffer(crtc->fb);
7311         if (intel_fb && intel_fb->obj->active) {
7312                 /* The framebuffer is still being accessed by the GPU. */
7313                 mod_timer(&intel_crtc->idle_timer, jiffies +
7314                           msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
7315                 return;
7316         }
7317
7318         intel_crtc->busy = false;
7319         queue_work(dev_priv->wq, &dev_priv->idle_work);
7320 }
7321
7322 static void intel_increase_pllclock(struct drm_crtc *crtc)
7323 {
7324         struct drm_device *dev = crtc->dev;
7325         drm_i915_private_t *dev_priv = dev->dev_private;
7326         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7327         int pipe = intel_crtc->pipe;
7328         int dpll_reg = DPLL(pipe);
7329         int dpll;
7330
7331         if (HAS_PCH_SPLIT(dev))
7332                 return;
7333
7334         if (!dev_priv->lvds_downclock_avail)
7335                 return;
7336
7337         dpll = I915_READ(dpll_reg);
7338         if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
7339                 DRM_DEBUG_DRIVER("upclocking LVDS\n");
7340
7341                 assert_panel_unlocked(dev_priv, pipe);
7342
7343                 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
7344                 I915_WRITE(dpll_reg, dpll);
7345                 intel_wait_for_vblank(dev, pipe);
7346
7347                 dpll = I915_READ(dpll_reg);
7348                 if (dpll & DISPLAY_RATE_SELECT_FPA1)
7349                         DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
7350         }
7351
7352         /* Schedule downclock */
7353         mod_timer(&intel_crtc->idle_timer, jiffies +
7354                   msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
7355 }
7356
7357 static void intel_decrease_pllclock(struct drm_crtc *crtc)
7358 {
7359         struct drm_device *dev = crtc->dev;
7360         drm_i915_private_t *dev_priv = dev->dev_private;
7361         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7362         int pipe = intel_crtc->pipe;
7363         int dpll_reg = DPLL(pipe);
7364         int dpll = I915_READ(dpll_reg);
7365
7366         if (HAS_PCH_SPLIT(dev))
7367                 return;
7368
7369         if (!dev_priv->lvds_downclock_avail)
7370                 return;
7371
7372         /*
7373          * Since this is called by a timer, we should never get here in
7374          * the manual case.
7375          */
7376         if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
7377                 DRM_DEBUG_DRIVER("downclocking LVDS\n");
7378
7379                 assert_panel_unlocked(dev_priv, pipe);
7380
7381                 dpll |= DISPLAY_RATE_SELECT_FPA1;
7382                 I915_WRITE(dpll_reg, dpll);
7383                 intel_wait_for_vblank(dev, pipe);
7384                 dpll = I915_READ(dpll_reg);
7385                 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
7386                         DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
7387         }
7388
7389 }
7390
7391 /**
7392  * intel_idle_update - adjust clocks for idleness
7393  * @work: work struct
7394  *
7395  * Either the GPU or display (or both) went idle.  Check the busy status
7396  * here and adjust the CRTC and GPU clocks as necessary.
7397  */
7398 static void intel_idle_update(struct work_struct *work)
7399 {
7400         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
7401                                                     idle_work);
7402         struct drm_device *dev = dev_priv->dev;
7403         struct drm_crtc *crtc;
7404         struct intel_crtc *intel_crtc;
7405
7406         if (!i915_powersave)
7407                 return;
7408
7409         mutex_lock(&dev->struct_mutex);
7410
7411         i915_update_gfx_val(dev_priv);
7412
7413         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7414                 /* Skip inactive CRTCs */
7415                 if (!crtc->fb)
7416                         continue;
7417
7418                 intel_crtc = to_intel_crtc(crtc);
7419                 if (!intel_crtc->busy)
7420                         intel_decrease_pllclock(crtc);
7421         }
7422
7423
7424         mutex_unlock(&dev->struct_mutex);
7425 }
7426
7427 /**
7428  * intel_mark_busy - mark the GPU and possibly the display busy
7429  * @dev: drm device
7430  * @obj: object we're operating on
7431  *
7432  * Callers can use this function to indicate that the GPU is busy processing
7433  * commands.  If @obj matches one of the CRTC objects (i.e. it's a scanout
7434  * buffer), we'll also mark the display as busy, so we know to increase its
7435  * clock frequency.
7436  */
7437 void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
7438 {
7439         drm_i915_private_t *dev_priv = dev->dev_private;
7440         struct drm_crtc *crtc = NULL;
7441         struct intel_framebuffer *intel_fb;
7442         struct intel_crtc *intel_crtc;
7443
7444         if (!drm_core_check_feature(dev, DRIVER_MODESET))
7445                 return;
7446
7447         if (!dev_priv->busy)
7448                 dev_priv->busy = true;
7449         else
7450                 mod_timer(&dev_priv->idle_timer, jiffies +
7451                           msecs_to_jiffies(GPU_IDLE_TIMEOUT));
7452
7453         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7454                 if (!crtc->fb)
7455                         continue;
7456
7457                 intel_crtc = to_intel_crtc(crtc);
7458                 intel_fb = to_intel_framebuffer(crtc->fb);
7459                 if (intel_fb->obj == obj) {
7460                         if (!intel_crtc->busy) {
7461                                 /* Non-busy -> busy, upclock */
7462                                 intel_increase_pllclock(crtc);
7463                                 intel_crtc->busy = true;
7464                         } else {
7465                                 /* Busy -> busy, put off timer */
7466                                 mod_timer(&intel_crtc->idle_timer, jiffies +
7467                                           msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
7468                         }
7469                 }
7470         }
7471 }
7472
7473 static void intel_crtc_destroy(struct drm_crtc *crtc)
7474 {
7475         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7476         struct drm_device *dev = crtc->dev;
7477         struct intel_unpin_work *work;
7478         unsigned long flags;
7479
7480         spin_lock_irqsave(&dev->event_lock, flags);
7481         work = intel_crtc->unpin_work;
7482         intel_crtc->unpin_work = NULL;
7483         spin_unlock_irqrestore(&dev->event_lock, flags);
7484
7485         if (work) {
7486                 cancel_work_sync(&work->work);
7487                 kfree(work);
7488         }
7489
7490         drm_crtc_cleanup(crtc);
7491
7492         kfree(intel_crtc);
7493 }
7494
7495 static void intel_unpin_work_fn(struct work_struct *__work)
7496 {
7497         struct intel_unpin_work *work =
7498                 container_of(__work, struct intel_unpin_work, work);
7499
7500         mutex_lock(&work->dev->struct_mutex);
7501         intel_unpin_fb_obj(work->old_fb_obj);
7502         drm_gem_object_unreference(&work->pending_flip_obj->base);
7503         drm_gem_object_unreference(&work->old_fb_obj->base);
7504
7505         intel_update_fbc(work->dev);
7506         mutex_unlock(&work->dev->struct_mutex);
7507         kfree(work);
7508 }
7509
7510 static void do_intel_finish_page_flip(struct drm_device *dev,
7511                                       struct drm_crtc *crtc)
7512 {
7513         drm_i915_private_t *dev_priv = dev->dev_private;
7514         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7515         struct intel_unpin_work *work;
7516         struct drm_i915_gem_object *obj;
7517         struct drm_pending_vblank_event *e;
7518         struct timeval tnow, tvbl;
7519         unsigned long flags;
7520
7521         /* Ignore early vblank irqs */
7522         if (intel_crtc == NULL)
7523                 return;
7524
7525         do_gettimeofday(&tnow);
7526
7527         spin_lock_irqsave(&dev->event_lock, flags);
7528         work = intel_crtc->unpin_work;
7529         if (work == NULL || !work->pending) {
7530                 spin_unlock_irqrestore(&dev->event_lock, flags);
7531                 return;
7532         }
7533
7534         intel_crtc->unpin_work = NULL;
7535
7536         if (work->event) {
7537                 e = work->event;
7538                 e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
7539
7540                 /* Called before vblank count and timestamps have
7541                  * been updated for the vblank interval of flip
7542                  * completion? Need to increment vblank count and
7543                  * add one videorefresh duration to returned timestamp
7544                  * to account for this. We assume this happened if we
7545                  * get called over 0.9 frame durations after the last
7546                  * timestamped vblank.
7547                  *
7548                  * This calculation can not be used with vrefresh rates
7549                  * below 5Hz (10Hz to be on the safe side) without
7550                  * promoting to 64 integers.
7551                  */
7552                 if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
7553                     9 * crtc->framedur_ns) {
7554                         e->event.sequence++;
7555                         tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
7556                                              crtc->framedur_ns);
7557                 }
7558
7559                 e->event.tv_sec = tvbl.tv_sec;
7560                 e->event.tv_usec = tvbl.tv_usec;
7561
7562                 list_add_tail(&e->base.link,
7563                               &e->base.file_priv->event_list);
7564                 wake_up_interruptible(&e->base.file_priv->event_wait);
7565         }
7566
7567         drm_vblank_put(dev, intel_crtc->pipe);
7568
7569         spin_unlock_irqrestore(&dev->event_lock, flags);
7570
7571         obj = work->old_fb_obj;
7572
7573         atomic_clear_mask(1 << intel_crtc->plane,
7574                           &obj->pending_flip.counter);
7575         if (atomic_read(&obj->pending_flip) == 0)
7576                 wake_up(&dev_priv->pending_flip_queue);
7577
7578         schedule_work(&work->work);
7579
7580         trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
7581 }
7582
7583 void intel_finish_page_flip(struct drm_device *dev, int pipe)
7584 {
7585         drm_i915_private_t *dev_priv = dev->dev_private;
7586         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
7587
7588         do_intel_finish_page_flip(dev, crtc);
7589 }
7590
7591 void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
7592 {
7593         drm_i915_private_t *dev_priv = dev->dev_private;
7594         struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
7595
7596         do_intel_finish_page_flip(dev, crtc);
7597 }
7598
7599 void intel_prepare_page_flip(struct drm_device *dev, int plane)
7600 {
7601         drm_i915_private_t *dev_priv = dev->dev_private;
7602         struct intel_crtc *intel_crtc =
7603                 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
7604         unsigned long flags;
7605
7606         spin_lock_irqsave(&dev->event_lock, flags);
7607         if (intel_crtc->unpin_work) {
7608                 if ((++intel_crtc->unpin_work->pending) > 1)
7609                         DRM_ERROR("Prepared flip multiple times\n");
7610         } else {
7611                 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
7612         }
7613         spin_unlock_irqrestore(&dev->event_lock, flags);
7614 }
7615
7616 static int intel_gen2_queue_flip(struct drm_device *dev,
7617                                  struct drm_crtc *crtc,
7618                                  struct drm_framebuffer *fb,
7619                                  struct drm_i915_gem_object *obj)
7620 {
7621         struct drm_i915_private *dev_priv = dev->dev_private;
7622         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7623         unsigned long offset;
7624         u32 flip_mask;
7625         int ret;
7626
7627         ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7628         if (ret)
7629                 goto out;
7630
7631         /* Offset into the new buffer for cases of shared fbs between CRTCs */
7632         offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
7633
7634         ret = BEGIN_LP_RING(6);
7635         if (ret)
7636                 goto out;
7637
7638         /* Can't queue multiple flips, so wait for the previous
7639          * one to finish before executing the next.
7640          */
7641         if (intel_crtc->plane)
7642                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7643         else
7644                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7645         OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
7646         OUT_RING(MI_NOOP);
7647         OUT_RING(MI_DISPLAY_FLIP |
7648                  MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7649         OUT_RING(fb->pitches[0]);
7650         OUT_RING(obj->gtt_offset + offset);
7651         OUT_RING(0); /* aux display base address, unused */
7652         ADVANCE_LP_RING();
7653 out:
7654         return ret;
7655 }
7656
7657 static int intel_gen3_queue_flip(struct drm_device *dev,
7658                                  struct drm_crtc *crtc,
7659                                  struct drm_framebuffer *fb,
7660                                  struct drm_i915_gem_object *obj)
7661 {
7662         struct drm_i915_private *dev_priv = dev->dev_private;
7663         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7664         unsigned long offset;
7665         u32 flip_mask;
7666         int ret;
7667
7668         ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7669         if (ret)
7670                 goto out;
7671
7672         /* Offset into the new buffer for cases of shared fbs between CRTCs */
7673         offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
7674
7675         ret = BEGIN_LP_RING(6);
7676         if (ret)
7677                 goto out;
7678
7679         if (intel_crtc->plane)
7680                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7681         else
7682                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7683         OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
7684         OUT_RING(MI_NOOP);
7685         OUT_RING(MI_DISPLAY_FLIP_I915 |
7686                  MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7687         OUT_RING(fb->pitches[0]);
7688         OUT_RING(obj->gtt_offset + offset);
7689         OUT_RING(MI_NOOP);
7690
7691         ADVANCE_LP_RING();
7692 out:
7693         return ret;
7694 }
7695
7696 static int intel_gen4_queue_flip(struct drm_device *dev,
7697                                  struct drm_crtc *crtc,
7698                                  struct drm_framebuffer *fb,
7699                                  struct drm_i915_gem_object *obj)
7700 {
7701         struct drm_i915_private *dev_priv = dev->dev_private;
7702         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7703         uint32_t pf, pipesrc;
7704         int ret;
7705
7706         ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7707         if (ret)
7708                 goto out;
7709
7710         ret = BEGIN_LP_RING(4);
7711         if (ret)
7712                 goto out;
7713
7714         /* i965+ uses the linear or tiled offsets from the
7715          * Display Registers (which do not change across a page-flip)
7716          * so we need only reprogram the base address.
7717          */
7718         OUT_RING(MI_DISPLAY_FLIP |
7719                  MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7720         OUT_RING(fb->pitches[0]);
7721         OUT_RING(obj->gtt_offset | obj->tiling_mode);
7722
7723         /* XXX Enabling the panel-fitter across page-flip is so far
7724          * untested on non-native modes, so ignore it for now.
7725          * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
7726          */
7727         pf = 0;
7728         pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7729         OUT_RING(pf | pipesrc);
7730         ADVANCE_LP_RING();
7731 out:
7732         return ret;
7733 }
7734
7735 static int intel_gen6_queue_flip(struct drm_device *dev,
7736                                  struct drm_crtc *crtc,
7737                                  struct drm_framebuffer *fb,
7738                                  struct drm_i915_gem_object *obj)
7739 {
7740         struct drm_i915_private *dev_priv = dev->dev_private;
7741         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7742         uint32_t pf, pipesrc;
7743         int ret;
7744
7745         ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7746         if (ret)
7747                 goto out;
7748
7749         ret = BEGIN_LP_RING(4);
7750         if (ret)
7751                 goto out;
7752
7753         OUT_RING(MI_DISPLAY_FLIP |
7754                  MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7755         OUT_RING(fb->pitches[0] | obj->tiling_mode);
7756         OUT_RING(obj->gtt_offset);
7757
7758         pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
7759         pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7760         OUT_RING(pf | pipesrc);
7761         ADVANCE_LP_RING();
7762 out:
7763         return ret;
7764 }
7765
7766 /*
7767  * On gen7 we currently use the blit ring because (in early silicon at least)
7768  * the render ring doesn't give us interrpts for page flip completion, which
7769  * means clients will hang after the first flip is queued.  Fortunately the
7770  * blit ring generates interrupts properly, so use it instead.
7771  */
7772 static int intel_gen7_queue_flip(struct drm_device *dev,
7773                                  struct drm_crtc *crtc,
7774                                  struct drm_framebuffer *fb,
7775                                  struct drm_i915_gem_object *obj)
7776 {
7777         struct drm_i915_private *dev_priv = dev->dev_private;
7778         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7779         struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
7780         int ret;
7781
7782         ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7783         if (ret)
7784                 goto out;
7785
7786         ret = intel_ring_begin(ring, 4);
7787         if (ret)
7788                 goto out;
7789
7790         intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
7791         intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
7792         intel_ring_emit(ring, (obj->gtt_offset));
7793         intel_ring_emit(ring, (MI_NOOP));
7794         intel_ring_advance(ring);
7795 out:
7796         return ret;
7797 }
7798
7799 static int intel_default_queue_flip(struct drm_device *dev,
7800                                     struct drm_crtc *crtc,
7801                                     struct drm_framebuffer *fb,
7802                                     struct drm_i915_gem_object *obj)
7803 {
7804         return -ENODEV;
7805 }
7806
7807 static int intel_crtc_page_flip(struct drm_crtc *crtc,
7808                                 struct drm_framebuffer *fb,
7809                                 struct drm_pending_vblank_event *event)
7810 {
7811         struct drm_device *dev = crtc->dev;
7812         struct drm_i915_private *dev_priv = dev->dev_private;
7813         struct intel_framebuffer *intel_fb;
7814         struct drm_i915_gem_object *obj;
7815         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7816         struct intel_unpin_work *work;
7817         unsigned long flags;
7818         int ret;
7819
7820         work = kzalloc(sizeof *work, GFP_KERNEL);
7821         if (work == NULL)
7822                 return -ENOMEM;
7823
7824         work->event = event;
7825         work->dev = crtc->dev;
7826         intel_fb = to_intel_framebuffer(crtc->fb);
7827         work->old_fb_obj = intel_fb->obj;
7828         INIT_WORK(&work->work, intel_unpin_work_fn);
7829
7830         ret = drm_vblank_get(dev, intel_crtc->pipe);
7831         if (ret)
7832                 goto free_work;
7833
7834         /* We borrow the event spin lock for protecting unpin_work */
7835         spin_lock_irqsave(&dev->event_lock, flags);
7836         if (intel_crtc->unpin_work) {
7837                 spin_unlock_irqrestore(&dev->event_lock, flags);
7838                 kfree(work);
7839                 drm_vblank_put(dev, intel_crtc->pipe);
7840
7841                 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
7842                 return -EBUSY;
7843         }
7844         intel_crtc->unpin_work = work;
7845         spin_unlock_irqrestore(&dev->event_lock, flags);
7846
7847         intel_fb = to_intel_framebuffer(fb);
7848         obj = intel_fb->obj;
7849
7850         mutex_lock(&dev->struct_mutex);
7851
7852         /* Reference the objects for the scheduled work. */
7853         drm_gem_object_reference(&work->old_fb_obj->base);
7854         drm_gem_object_reference(&obj->base);
7855
7856         crtc->fb = fb;
7857
7858         work->pending_flip_obj = obj;
7859
7860         work->enable_stall_check = true;
7861
7862         /* Block clients from rendering to the new back buffer until
7863          * the flip occurs and the object is no longer visible.
7864          */
7865         atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
7866
7867         ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
7868         if (ret)
7869                 goto cleanup_pending;
7870
7871         intel_disable_fbc(dev);
7872         mutex_unlock(&dev->struct_mutex);
7873
7874         trace_i915_flip_request(intel_crtc->plane, obj);
7875
7876         return 0;
7877
7878 cleanup_pending:
7879         atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
7880         drm_gem_object_unreference(&work->old_fb_obj->base);
7881         drm_gem_object_unreference(&obj->base);
7882         mutex_unlock(&dev->struct_mutex);
7883
7884         spin_lock_irqsave(&dev->event_lock, flags);
7885         intel_crtc->unpin_work = NULL;
7886         spin_unlock_irqrestore(&dev->event_lock, flags);
7887
7888         drm_vblank_put(dev, intel_crtc->pipe);
7889 free_work:
7890         kfree(work);
7891
7892         return ret;
7893 }
7894
7895 static void intel_sanitize_modesetting(struct drm_device *dev,
7896                                        int pipe, int plane)
7897 {
7898         struct drm_i915_private *dev_priv = dev->dev_private;
7899         u32 reg, val;
7900
7901         if (HAS_PCH_SPLIT(dev))
7902                 return;
7903
7904         /* Who knows what state these registers were left in by the BIOS or
7905          * grub?
7906          *
7907          * If we leave the registers in a conflicting state (e.g. with the
7908          * display plane reading from the other pipe than the one we intend
7909          * to use) then when we attempt to teardown the active mode, we will
7910          * not disable the pipes and planes in the correct order -- leaving
7911          * a plane reading from a disabled pipe and possibly leading to
7912          * undefined behaviour.
7913          */
7914
7915         reg = DSPCNTR(plane);
7916         val = I915_READ(reg);
7917
7918         if ((val & DISPLAY_PLANE_ENABLE) == 0)
7919                 return;
7920         if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
7921                 return;
7922
7923         /* This display plane is active and attached to the other CPU pipe. */
7924         pipe = !pipe;
7925
7926         /* Disable the plane and wait for it to stop reading from the pipe. */
7927         intel_disable_plane(dev_priv, plane, pipe);
7928         intel_disable_pipe(dev_priv, pipe);
7929 }
7930
7931 static void intel_crtc_reset(struct drm_crtc *crtc)
7932 {
7933         struct drm_device *dev = crtc->dev;
7934         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7935
7936         /* Reset flags back to the 'unknown' status so that they
7937          * will be correctly set on the initial modeset.
7938          */
7939         intel_crtc->dpms_mode = -1;
7940
7941         /* We need to fix up any BIOS configuration that conflicts with
7942          * our expectations.
7943          */
7944         intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
7945 }
7946
7947 static struct drm_crtc_helper_funcs intel_helper_funcs = {
7948         .dpms = intel_crtc_dpms,
7949         .mode_fixup = intel_crtc_mode_fixup,
7950         .mode_set = intel_crtc_mode_set,
7951         .mode_set_base = intel_pipe_set_base,
7952         .mode_set_base_atomic = intel_pipe_set_base_atomic,
7953         .load_lut = intel_crtc_load_lut,
7954         .disable = intel_crtc_disable,
7955 };
7956
7957 static const struct drm_crtc_funcs intel_crtc_funcs = {
7958         .reset = intel_crtc_reset,
7959         .cursor_set = intel_crtc_cursor_set,
7960         .cursor_move = intel_crtc_cursor_move,
7961         .gamma_set = intel_crtc_gamma_set,
7962         .set_config = drm_crtc_helper_set_config,
7963         .destroy = intel_crtc_destroy,
7964         .page_flip = intel_crtc_page_flip,
7965 };
7966
7967 static void intel_crtc_init(struct drm_device *dev, int pipe)
7968 {
7969         drm_i915_private_t *dev_priv = dev->dev_private;
7970         struct intel_crtc *intel_crtc;
7971         int i;
7972
7973         intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
7974         if (intel_crtc == NULL)
7975                 return;
7976
7977         drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
7978
7979         drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
7980         for (i = 0; i < 256; i++) {
7981                 intel_crtc->lut_r[i] = i;
7982                 intel_crtc->lut_g[i] = i;
7983                 intel_crtc->lut_b[i] = i;
7984         }
7985
7986         /* Swap pipes & planes for FBC on pre-965 */
7987         intel_crtc->pipe = pipe;
7988         intel_crtc->plane = pipe;
7989         if (IS_MOBILE(dev) && IS_GEN3(dev)) {
7990                 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
7991                 intel_crtc->plane = !pipe;
7992         }
7993
7994         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
7995                dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
7996         dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
7997         dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
7998
7999         intel_crtc_reset(&intel_crtc->base);
8000         intel_crtc->active = true; /* force the pipe off on setup_init_config */
8001         intel_crtc->bpp = 24; /* default for pre-Ironlake */
8002
8003         if (HAS_PCH_SPLIT(dev)) {
8004                 if (pipe == 2 && IS_IVYBRIDGE(dev))
8005                         intel_crtc->no_pll = true;
8006                 intel_helper_funcs.prepare = ironlake_crtc_prepare;
8007                 intel_helper_funcs.commit = ironlake_crtc_commit;
8008         } else {
8009                 intel_helper_funcs.prepare = i9xx_crtc_prepare;
8010                 intel_helper_funcs.commit = i9xx_crtc_commit;
8011         }
8012
8013         drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
8014
8015         intel_crtc->busy = false;
8016
8017         setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
8018                     (unsigned long)intel_crtc);
8019 }
8020
8021 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
8022                                 struct drm_file *file)
8023 {
8024         drm_i915_private_t *dev_priv = dev->dev_private;
8025         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
8026         struct drm_mode_object *drmmode_obj;
8027         struct intel_crtc *crtc;
8028
8029         if (!dev_priv) {
8030                 DRM_ERROR("called with no initialization\n");
8031                 return -EINVAL;
8032         }
8033
8034         drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
8035                         DRM_MODE_OBJECT_CRTC);
8036
8037         if (!drmmode_obj) {
8038                 DRM_ERROR("no such CRTC id\n");
8039                 return -EINVAL;
8040         }
8041
8042         crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
8043         pipe_from_crtc_id->pipe = crtc->pipe;
8044
8045         return 0;
8046 }
8047
8048 static int intel_encoder_clones(struct drm_device *dev, int type_mask)
8049 {
8050         struct intel_encoder *encoder;
8051         int index_mask = 0;
8052         int entry = 0;
8053
8054         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
8055                 if (type_mask & encoder->clone_mask)
8056                         index_mask |= (1 << entry);
8057                 entry++;
8058         }
8059
8060         return index_mask;
8061 }
8062
8063 static bool has_edp_a(struct drm_device *dev)
8064 {
8065         struct drm_i915_private *dev_priv = dev->dev_private;
8066
8067         if (!IS_MOBILE(dev))
8068                 return false;
8069
8070         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
8071                 return false;
8072
8073         if (IS_GEN5(dev) &&
8074             (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
8075                 return false;
8076
8077         return true;
8078 }
8079
8080 static void intel_setup_outputs(struct drm_device *dev)
8081 {
8082         struct drm_i915_private *dev_priv = dev->dev_private;
8083         struct intel_encoder *encoder;
8084         bool dpd_is_edp = false;
8085         bool has_lvds;
8086
8087         has_lvds = intel_lvds_init(dev);
8088         if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
8089                 /* disable the panel fitter on everything but LVDS */
8090                 I915_WRITE(PFIT_CONTROL, 0);
8091         }
8092
8093         if (HAS_PCH_SPLIT(dev)) {
8094                 dpd_is_edp = intel_dpd_is_edp(dev);
8095
8096                 if (has_edp_a(dev))
8097                         intel_dp_init(dev, DP_A);
8098
8099                 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
8100                         intel_dp_init(dev, PCH_DP_D);
8101         }
8102
8103         intel_crt_init(dev);
8104
8105         if (HAS_PCH_SPLIT(dev)) {
8106                 int found;
8107
8108                 if (I915_READ(HDMIB) & PORT_DETECTED) {
8109                         /* PCH SDVOB multiplex with HDMIB */
8110                         found = intel_sdvo_init(dev, PCH_SDVOB, true);
8111                         if (!found)
8112                                 intel_hdmi_init(dev, HDMIB);
8113                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
8114                                 intel_dp_init(dev, PCH_DP_B);
8115                 }
8116
8117                 if (I915_READ(HDMIC) & PORT_DETECTED)
8118                         intel_hdmi_init(dev, HDMIC);
8119
8120                 if (I915_READ(HDMID) & PORT_DETECTED)
8121                         intel_hdmi_init(dev, HDMID);
8122
8123                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
8124                         intel_dp_init(dev, PCH_DP_C);
8125
8126                 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
8127                         intel_dp_init(dev, PCH_DP_D);
8128
8129         } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
8130                 bool found = false;
8131
8132                 if (I915_READ(SDVOB) & SDVO_DETECTED) {
8133                         DRM_DEBUG_KMS("probing SDVOB\n");
8134                         found = intel_sdvo_init(dev, SDVOB, true);
8135                         if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
8136                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
8137                                 intel_hdmi_init(dev, SDVOB);
8138                         }
8139
8140                         if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
8141                                 DRM_DEBUG_KMS("probing DP_B\n");
8142                                 intel_dp_init(dev, DP_B);
8143                         }
8144                 }
8145
8146                 /* Before G4X SDVOC doesn't have its own detect register */
8147
8148                 if (I915_READ(SDVOB) & SDVO_DETECTED) {
8149                         DRM_DEBUG_KMS("probing SDVOC\n");
8150                         found = intel_sdvo_init(dev, SDVOC, false);
8151                 }
8152
8153                 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
8154
8155                         if (SUPPORTS_INTEGRATED_HDMI(dev)) {
8156                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
8157                                 intel_hdmi_init(dev, SDVOC);
8158                         }
8159                         if (SUPPORTS_INTEGRATED_DP(dev)) {
8160                                 DRM_DEBUG_KMS("probing DP_C\n");
8161                                 intel_dp_init(dev, DP_C);
8162                         }
8163                 }
8164
8165                 if (SUPPORTS_INTEGRATED_DP(dev) &&
8166                     (I915_READ(DP_D) & DP_DETECTED)) {
8167                         DRM_DEBUG_KMS("probing DP_D\n");
8168                         intel_dp_init(dev, DP_D);
8169                 }
8170         } else if (IS_GEN2(dev))
8171                 intel_dvo_init(dev);
8172
8173         if (SUPPORTS_TV(dev))
8174                 intel_tv_init(dev);
8175
8176         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
8177                 encoder->base.possible_crtcs = encoder->crtc_mask;
8178                 encoder->base.possible_clones =
8179                         intel_encoder_clones(dev, encoder->clone_mask);
8180         }
8181
8182         /* disable all the possible outputs/crtcs before entering KMS mode */
8183         drm_helper_disable_unused_functions(dev);
8184
8185         if (HAS_PCH_SPLIT(dev))
8186                 ironlake_init_pch_refclk(dev);
8187 }
8188
8189 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
8190 {
8191         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
8192
8193         drm_framebuffer_cleanup(fb);
8194         drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
8195
8196         kfree(intel_fb);
8197 }
8198
8199 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
8200                                                 struct drm_file *file,
8201                                                 unsigned int *handle)
8202 {
8203         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
8204         struct drm_i915_gem_object *obj = intel_fb->obj;
8205
8206         return drm_gem_handle_create(file, &obj->base, handle);
8207 }
8208
8209 static const struct drm_framebuffer_funcs intel_fb_funcs = {
8210         .destroy = intel_user_framebuffer_destroy,
8211         .create_handle = intel_user_framebuffer_create_handle,
8212 };
8213
8214 int intel_framebuffer_init(struct drm_device *dev,
8215                            struct intel_framebuffer *intel_fb,
8216                            struct drm_mode_fb_cmd2 *mode_cmd,
8217                            struct drm_i915_gem_object *obj)
8218 {
8219         int ret;
8220
8221         if (obj->tiling_mode == I915_TILING_Y)
8222                 return -EINVAL;
8223
8224         if (mode_cmd->pitches[0] & 63)
8225                 return -EINVAL;
8226
8227         switch (mode_cmd->pixel_format) {
8228         case DRM_FORMAT_RGB332:
8229         case DRM_FORMAT_RGB565:
8230         case DRM_FORMAT_XRGB8888:
8231         case DRM_FORMAT_ARGB8888:
8232         case DRM_FORMAT_XRGB2101010:
8233         case DRM_FORMAT_ARGB2101010:
8234                 /* RGB formats are common across chipsets */
8235                 break;
8236         case DRM_FORMAT_YUYV:
8237         case DRM_FORMAT_UYVY:
8238         case DRM_FORMAT_YVYU:
8239         case DRM_FORMAT_VYUY:
8240                 break;
8241         default:
8242                 DRM_DEBUG_KMS("unsupported pixel format %u\n",
8243                                 mode_cmd->pixel_format);
8244                 return -EINVAL;
8245         }
8246
8247         ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
8248         if (ret) {
8249                 DRM_ERROR("framebuffer init failed %d\n", ret);
8250                 return ret;
8251         }
8252
8253         drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
8254         intel_fb->obj = obj;
8255         return 0;
8256 }
8257
8258 static struct drm_framebuffer *
8259 intel_user_framebuffer_create(struct drm_device *dev,
8260                               struct drm_file *filp,
8261                               struct drm_mode_fb_cmd2 *mode_cmd)
8262 {
8263         struct drm_i915_gem_object *obj;
8264
8265         obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
8266                                                 mode_cmd->handles[0]));
8267         if (&obj->base == NULL)
8268                 return ERR_PTR(-ENOENT);
8269
8270         return intel_framebuffer_create(dev, mode_cmd, obj);
8271 }
8272
8273 static const struct drm_mode_config_funcs intel_mode_funcs = {
8274         .fb_create = intel_user_framebuffer_create,
8275         .output_poll_changed = intel_fb_output_poll_changed,
8276 };
8277
8278 static struct drm_i915_gem_object *
8279 intel_alloc_context_page(struct drm_device *dev)
8280 {
8281         struct drm_i915_gem_object *ctx;
8282         int ret;
8283
8284         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
8285
8286         ctx = i915_gem_alloc_object(dev, 4096);
8287         if (!ctx) {
8288                 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
8289                 return NULL;
8290         }
8291
8292         ret = i915_gem_object_pin(ctx, 4096, true);
8293         if (ret) {
8294                 DRM_ERROR("failed to pin power context: %d\n", ret);
8295                 goto err_unref;
8296         }
8297
8298         ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
8299         if (ret) {
8300                 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
8301                 goto err_unpin;
8302         }
8303
8304         return ctx;
8305
8306 err_unpin:
8307         i915_gem_object_unpin(ctx);
8308 err_unref:
8309         drm_gem_object_unreference(&ctx->base);
8310         mutex_unlock(&dev->struct_mutex);
8311         return NULL;
8312 }
8313
8314 bool ironlake_set_drps(struct drm_device *dev, u8 val)
8315 {
8316         struct drm_i915_private *dev_priv = dev->dev_private;
8317         u16 rgvswctl;
8318
8319         rgvswctl = I915_READ16(MEMSWCTL);
8320         if (rgvswctl & MEMCTL_CMD_STS) {
8321                 DRM_DEBUG("gpu busy, RCS change rejected\n");
8322                 return false; /* still busy with another command */
8323         }
8324
8325         rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
8326                 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
8327         I915_WRITE16(MEMSWCTL, rgvswctl);
8328         POSTING_READ16(MEMSWCTL);
8329
8330         rgvswctl |= MEMCTL_CMD_STS;
8331         I915_WRITE16(MEMSWCTL, rgvswctl);
8332
8333         return true;
8334 }
8335
8336 void ironlake_enable_drps(struct drm_device *dev)
8337 {
8338         struct drm_i915_private *dev_priv = dev->dev_private;
8339         u32 rgvmodectl = I915_READ(MEMMODECTL);
8340         u8 fmax, fmin, fstart, vstart;
8341
8342         /* Enable temp reporting */
8343         I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
8344         I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
8345
8346         /* 100ms RC evaluation intervals */
8347         I915_WRITE(RCUPEI, 100000);
8348         I915_WRITE(RCDNEI, 100000);
8349
8350         /* Set max/min thresholds to 90ms and 80ms respectively */
8351         I915_WRITE(RCBMAXAVG, 90000);
8352         I915_WRITE(RCBMINAVG, 80000);
8353
8354         I915_WRITE(MEMIHYST, 1);
8355
8356         /* Set up min, max, and cur for interrupt handling */
8357         fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
8358         fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
8359         fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
8360                 MEMMODE_FSTART_SHIFT;
8361
8362         vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
8363                 PXVFREQ_PX_SHIFT;
8364
8365         dev_priv->fmax = fmax; /* IPS callback will increase this */
8366         dev_priv->fstart = fstart;
8367
8368         dev_priv->max_delay = fstart;
8369         dev_priv->min_delay = fmin;
8370         dev_priv->cur_delay = fstart;
8371
8372         DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
8373                          fmax, fmin, fstart);
8374
8375         I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
8376
8377         /*
8378          * Interrupts will be enabled in ironlake_irq_postinstall
8379          */
8380
8381         I915_WRITE(VIDSTART, vstart);
8382         POSTING_READ(VIDSTART);
8383
8384         rgvmodectl |= MEMMODE_SWMODE_EN;
8385         I915_WRITE(MEMMODECTL, rgvmodectl);
8386
8387         if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
8388                 DRM_ERROR("stuck trying to change perf mode\n");
8389         msleep(1);
8390
8391         ironlake_set_drps(dev, fstart);
8392
8393         dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
8394                 I915_READ(0x112e0);
8395         dev_priv->last_time1 = jiffies_to_msecs(jiffies);
8396         dev_priv->last_count2 = I915_READ(0x112f4);
8397         getrawmonotonic(&dev_priv->last_time2);
8398 }
8399
8400 void ironlake_disable_drps(struct drm_device *dev)
8401 {
8402         struct drm_i915_private *dev_priv = dev->dev_private;
8403         u16 rgvswctl = I915_READ16(MEMSWCTL);
8404
8405         /* Ack interrupts, disable EFC interrupt */
8406         I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
8407         I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
8408         I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
8409         I915_WRITE(DEIIR, DE_PCU_EVENT);
8410         I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
8411
8412         /* Go back to the starting frequency */
8413         ironlake_set_drps(dev, dev_priv->fstart);
8414         msleep(1);
8415         rgvswctl |= MEMCTL_CMD_STS;
8416         I915_WRITE(MEMSWCTL, rgvswctl);
8417         msleep(1);
8418
8419 }
8420
8421 void gen6_set_rps(struct drm_device *dev, u8 val)
8422 {
8423         struct drm_i915_private *dev_priv = dev->dev_private;
8424         u32 swreq;
8425
8426         swreq = (val & 0x3ff) << 25;
8427         I915_WRITE(GEN6_RPNSWREQ, swreq);
8428 }
8429
8430 void gen6_disable_rps(struct drm_device *dev)
8431 {
8432         struct drm_i915_private *dev_priv = dev->dev_private;
8433
8434         I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
8435         I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
8436         I915_WRITE(GEN6_PMIER, 0);
8437         /* Complete PM interrupt masking here doesn't race with the rps work
8438          * item again unmasking PM interrupts because that is using a different
8439          * register (PMIMR) to mask PM interrupts. The only risk is in leaving
8440          * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
8441
8442         spin_lock_irq(&dev_priv->rps_lock);
8443         dev_priv->pm_iir = 0;
8444         spin_unlock_irq(&dev_priv->rps_lock);
8445
8446         I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
8447 }
8448
8449 static unsigned long intel_pxfreq(u32 vidfreq)
8450 {
8451         unsigned long freq;
8452         int div = (vidfreq & 0x3f0000) >> 16;
8453         int post = (vidfreq & 0x3000) >> 12;
8454         int pre = (vidfreq & 0x7);
8455
8456         if (!pre)
8457                 return 0;
8458
8459         freq = ((div * 133333) / ((1<<post) * pre));
8460
8461         return freq;
8462 }
8463
8464 void intel_init_emon(struct drm_device *dev)
8465 {
8466         struct drm_i915_private *dev_priv = dev->dev_private;
8467         u32 lcfuse;
8468         u8 pxw[16];
8469         int i;
8470
8471         /* Disable to program */
8472         I915_WRITE(ECR, 0);
8473         POSTING_READ(ECR);
8474
8475         /* Program energy weights for various events */
8476         I915_WRITE(SDEW, 0x15040d00);
8477         I915_WRITE(CSIEW0, 0x007f0000);
8478         I915_WRITE(CSIEW1, 0x1e220004);
8479         I915_WRITE(CSIEW2, 0x04000004);
8480
8481         for (i = 0; i < 5; i++)
8482                 I915_WRITE(PEW + (i * 4), 0);
8483         for (i = 0; i < 3; i++)
8484                 I915_WRITE(DEW + (i * 4), 0);
8485
8486         /* Program P-state weights to account for frequency power adjustment */
8487         for (i = 0; i < 16; i++) {
8488                 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
8489                 unsigned long freq = intel_pxfreq(pxvidfreq);
8490                 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
8491                         PXVFREQ_PX_SHIFT;
8492                 unsigned long val;
8493
8494                 val = vid * vid;
8495                 val *= (freq / 1000);
8496                 val *= 255;
8497                 val /= (127*127*900);
8498                 if (val > 0xff)
8499                         DRM_ERROR("bad pxval: %ld\n", val);
8500                 pxw[i] = val;
8501         }
8502         /* Render standby states get 0 weight */
8503         pxw[14] = 0;
8504         pxw[15] = 0;
8505
8506         for (i = 0; i < 4; i++) {
8507                 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
8508                         (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
8509                 I915_WRITE(PXW + (i * 4), val);
8510         }
8511
8512         /* Adjust magic regs to magic values (more experimental results) */
8513         I915_WRITE(OGW0, 0);
8514         I915_WRITE(OGW1, 0);
8515         I915_WRITE(EG0, 0x00007f00);
8516         I915_WRITE(EG1, 0x0000000e);
8517         I915_WRITE(EG2, 0x000e0000);
8518         I915_WRITE(EG3, 0x68000300);
8519         I915_WRITE(EG4, 0x42000000);
8520         I915_WRITE(EG5, 0x00140031);
8521         I915_WRITE(EG6, 0);
8522         I915_WRITE(EG7, 0);
8523
8524         for (i = 0; i < 8; i++)
8525                 I915_WRITE(PXWL + (i * 4), 0);
8526
8527         /* Enable PMON + select events */
8528         I915_WRITE(ECR, 0x80000019);
8529
8530         lcfuse = I915_READ(LCFUSE02);
8531
8532         dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
8533 }
8534
8535 static bool intel_enable_rc6(struct drm_device *dev)
8536 {
8537         /*
8538          * Respect the kernel parameter if it is set
8539          */
8540         if (i915_enable_rc6 >= 0)
8541                 return i915_enable_rc6;
8542
8543         /*
8544          * Disable RC6 on Ironlake
8545          */
8546         if (INTEL_INFO(dev)->gen == 5)
8547                 return 0;
8548
8549         /*
8550          * Disable rc6 on Sandybridge
8551          */
8552         if (INTEL_INFO(dev)->gen == 6) {
8553                 DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n");
8554                 return 0;
8555         }
8556         DRM_DEBUG_DRIVER("RC6 enabled\n");
8557         return 1;
8558 }
8559
8560 void gen6_enable_rps(struct drm_i915_private *dev_priv)
8561 {
8562         u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
8563         u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
8564         u32 pcu_mbox, rc6_mask = 0;
8565         u32 gtfifodbg;
8566         int cur_freq, min_freq, max_freq;
8567         int i;
8568
8569         /* Here begins a magic sequence of register writes to enable
8570          * auto-downclocking.
8571          *
8572          * Perhaps there might be some value in exposing these to
8573          * userspace...
8574          */
8575         I915_WRITE(GEN6_RC_STATE, 0);
8576         mutex_lock(&dev_priv->dev->struct_mutex);
8577
8578         /* Clear the DBG now so we don't confuse earlier errors */
8579         if ((gtfifodbg = I915_READ(GTFIFODBG))) {
8580                 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
8581                 I915_WRITE(GTFIFODBG, gtfifodbg);
8582         }
8583
8584         gen6_gt_force_wake_get(dev_priv);
8585
8586         /* disable the counters and set deterministic thresholds */
8587         I915_WRITE(GEN6_RC_CONTROL, 0);
8588
8589         I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
8590         I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
8591         I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
8592         I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
8593         I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
8594
8595         for (i = 0; i < I915_NUM_RINGS; i++)
8596                 I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
8597
8598         I915_WRITE(GEN6_RC_SLEEP, 0);
8599         I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
8600         I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
8601         I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
8602         I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
8603
8604         if (intel_enable_rc6(dev_priv->dev))
8605                 rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
8606                         GEN6_RC_CTL_RC6_ENABLE;
8607
8608         I915_WRITE(GEN6_RC_CONTROL,
8609                    rc6_mask |
8610                    GEN6_RC_CTL_EI_MODE(1) |
8611                    GEN6_RC_CTL_HW_ENABLE);
8612
8613         I915_WRITE(GEN6_RPNSWREQ,
8614                    GEN6_FREQUENCY(10) |
8615                    GEN6_OFFSET(0) |
8616                    GEN6_AGGRESSIVE_TURBO);
8617         I915_WRITE(GEN6_RC_VIDEO_FREQ,
8618                    GEN6_FREQUENCY(12));
8619
8620         I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
8621         I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
8622                    18 << 24 |
8623                    6 << 16);
8624         I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
8625         I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
8626         I915_WRITE(GEN6_RP_UP_EI, 100000);
8627         I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
8628         I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
8629         I915_WRITE(GEN6_RP_CONTROL,
8630                    GEN6_RP_MEDIA_TURBO |
8631                    GEN6_RP_MEDIA_HW_MODE |
8632                    GEN6_RP_MEDIA_IS_GFX |
8633                    GEN6_RP_ENABLE |
8634                    GEN6_RP_UP_BUSY_AVG |
8635                    GEN6_RP_DOWN_IDLE_CONT);
8636
8637         if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8638                      500))
8639                 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
8640
8641         I915_WRITE(GEN6_PCODE_DATA, 0);
8642         I915_WRITE(GEN6_PCODE_MAILBOX,
8643                    GEN6_PCODE_READY |
8644                    GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
8645         if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8646                      500))
8647                 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
8648
8649         min_freq = (rp_state_cap & 0xff0000) >> 16;
8650         max_freq = rp_state_cap & 0xff;
8651         cur_freq = (gt_perf_status & 0xff00) >> 8;
8652
8653         /* Check for overclock support */
8654         if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8655                      500))
8656                 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
8657         I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
8658         pcu_mbox = I915_READ(GEN6_PCODE_DATA);
8659         if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8660                      500))
8661                 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
8662         if (pcu_mbox & (1<<31)) { /* OC supported */
8663                 max_freq = pcu_mbox & 0xff;
8664                 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
8665         }
8666
8667         /* In units of 100MHz */
8668         dev_priv->max_delay = max_freq;
8669         dev_priv->min_delay = min_freq;
8670         dev_priv->cur_delay = cur_freq;
8671
8672         /* requires MSI enabled */
8673         I915_WRITE(GEN6_PMIER,
8674                    GEN6_PM_MBOX_EVENT |
8675                    GEN6_PM_THERMAL_EVENT |
8676                    GEN6_PM_RP_DOWN_TIMEOUT |
8677                    GEN6_PM_RP_UP_THRESHOLD |
8678                    GEN6_PM_RP_DOWN_THRESHOLD |
8679                    GEN6_PM_RP_UP_EI_EXPIRED |
8680                    GEN6_PM_RP_DOWN_EI_EXPIRED);
8681         spin_lock_irq(&dev_priv->rps_lock);
8682         WARN_ON(dev_priv->pm_iir != 0);
8683         I915_WRITE(GEN6_PMIMR, 0);
8684         spin_unlock_irq(&dev_priv->rps_lock);
8685         /* enable all PM interrupts */
8686         I915_WRITE(GEN6_PMINTRMSK, 0);
8687
8688         gen6_gt_force_wake_put(dev_priv);
8689         mutex_unlock(&dev_priv->dev->struct_mutex);
8690 }
8691
8692 void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
8693 {
8694         int min_freq = 15;
8695         int gpu_freq, ia_freq, max_ia_freq;
8696         int scaling_factor = 180;
8697
8698         max_ia_freq = cpufreq_quick_get_max(0);
8699         /*
8700          * Default to measured freq if none found, PCU will ensure we don't go
8701          * over
8702          */
8703         if (!max_ia_freq)
8704                 max_ia_freq = tsc_khz;
8705
8706         /* Convert from kHz to MHz */
8707         max_ia_freq /= 1000;
8708
8709         mutex_lock(&dev_priv->dev->struct_mutex);
8710
8711         /*
8712          * For each potential GPU frequency, load a ring frequency we'd like
8713          * to use for memory access.  We do this by specifying the IA frequency
8714          * the PCU should use as a reference to determine the ring frequency.
8715          */
8716         for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
8717              gpu_freq--) {
8718                 int diff = dev_priv->max_delay - gpu_freq;
8719
8720                 /*
8721                  * For GPU frequencies less than 750MHz, just use the lowest
8722                  * ring freq.
8723                  */
8724                 if (gpu_freq < min_freq)
8725                         ia_freq = 800;
8726                 else
8727                         ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
8728                 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
8729
8730                 I915_WRITE(GEN6_PCODE_DATA,
8731                            (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
8732                            gpu_freq);
8733                 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
8734                            GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
8735                 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
8736                               GEN6_PCODE_READY) == 0, 10)) {
8737                         DRM_ERROR("pcode write of freq table timed out\n");
8738                         continue;
8739                 }
8740         }
8741
8742         mutex_unlock(&dev_priv->dev->struct_mutex);
8743 }
8744
8745 static void ironlake_init_clock_gating(struct drm_device *dev)
8746 {
8747         struct drm_i915_private *dev_priv = dev->dev_private;
8748         uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8749
8750         /* Required for FBC */
8751         dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
8752                 DPFCRUNIT_CLOCK_GATE_DISABLE |
8753                 DPFDUNIT_CLOCK_GATE_DISABLE;
8754         /* Required for CxSR */
8755         dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
8756
8757         I915_WRITE(PCH_3DCGDIS0,
8758                    MARIUNIT_CLOCK_GATE_DISABLE |
8759                    SVSMUNIT_CLOCK_GATE_DISABLE);
8760         I915_WRITE(PCH_3DCGDIS1,
8761                    VFMUNIT_CLOCK_GATE_DISABLE);
8762
8763         I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8764
8765         /*
8766          * According to the spec the following bits should be set in
8767          * order to enable memory self-refresh
8768          * The bit 22/21 of 0x42004
8769          * The bit 5 of 0x42020
8770          * The bit 15 of 0x45000
8771          */
8772         I915_WRITE(ILK_DISPLAY_CHICKEN2,
8773                    (I915_READ(ILK_DISPLAY_CHICKEN2) |
8774                     ILK_DPARB_GATE | ILK_VSDPFD_FULL));
8775         I915_WRITE(ILK_DSPCLK_GATE,
8776                    (I915_READ(ILK_DSPCLK_GATE) |
8777                     ILK_DPARB_CLK_GATE));
8778         I915_WRITE(DISP_ARB_CTL,
8779                    (I915_READ(DISP_ARB_CTL) |
8780                     DISP_FBC_WM_DIS));
8781         I915_WRITE(WM3_LP_ILK, 0);
8782         I915_WRITE(WM2_LP_ILK, 0);
8783         I915_WRITE(WM1_LP_ILK, 0);
8784
8785         /*
8786          * Based on the document from hardware guys the following bits
8787          * should be set unconditionally in order to enable FBC.
8788          * The bit 22 of 0x42000
8789          * The bit 22 of 0x42004
8790          * The bit 7,8,9 of 0x42020.
8791          */
8792         if (IS_IRONLAKE_M(dev)) {
8793                 I915_WRITE(ILK_DISPLAY_CHICKEN1,
8794                            I915_READ(ILK_DISPLAY_CHICKEN1) |
8795                            ILK_FBCQ_DIS);
8796                 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8797                            I915_READ(ILK_DISPLAY_CHICKEN2) |
8798                            ILK_DPARB_GATE);
8799                 I915_WRITE(ILK_DSPCLK_GATE,
8800                            I915_READ(ILK_DSPCLK_GATE) |
8801                            ILK_DPFC_DIS1 |
8802                            ILK_DPFC_DIS2 |
8803                            ILK_CLK_FBC);
8804         }
8805
8806         I915_WRITE(ILK_DISPLAY_CHICKEN2,
8807                    I915_READ(ILK_DISPLAY_CHICKEN2) |
8808                    ILK_ELPIN_409_SELECT);
8809         I915_WRITE(_3D_CHICKEN2,
8810                    _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
8811                    _3D_CHICKEN2_WM_READ_PIPELINED);
8812 }
8813
8814 static void gen6_init_clock_gating(struct drm_device *dev)
8815 {
8816         struct drm_i915_private *dev_priv = dev->dev_private;
8817         int pipe;
8818         uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8819
8820         I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8821
8822         I915_WRITE(ILK_DISPLAY_CHICKEN2,
8823                    I915_READ(ILK_DISPLAY_CHICKEN2) |
8824                    ILK_ELPIN_409_SELECT);
8825
8826         I915_WRITE(WM3_LP_ILK, 0);
8827         I915_WRITE(WM2_LP_ILK, 0);
8828         I915_WRITE(WM1_LP_ILK, 0);
8829
8830         /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
8831          * gating disable must be set.  Failure to set it results in
8832          * flickering pixels due to Z write ordering failures after
8833          * some amount of runtime in the Mesa "fire" demo, and Unigine
8834          * Sanctuary and Tropics, and apparently anything else with
8835          * alpha test or pixel discard.
8836          *
8837          * According to the spec, bit 11 (RCCUNIT) must also be set,
8838          * but we didn't debug actual testcases to find it out.
8839          */
8840         I915_WRITE(GEN6_UCGCTL2,
8841                    GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
8842                    GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
8843
8844         /*
8845          * According to the spec the following bits should be
8846          * set in order to enable memory self-refresh and fbc:
8847          * The bit21 and bit22 of 0x42000
8848          * The bit21 and bit22 of 0x42004
8849          * The bit5 and bit7 of 0x42020
8850          * The bit14 of 0x70180
8851          * The bit14 of 0x71180
8852          */
8853         I915_WRITE(ILK_DISPLAY_CHICKEN1,
8854                    I915_READ(ILK_DISPLAY_CHICKEN1) |
8855                    ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
8856         I915_WRITE(ILK_DISPLAY_CHICKEN2,
8857                    I915_READ(ILK_DISPLAY_CHICKEN2) |
8858                    ILK_DPARB_GATE | ILK_VSDPFD_FULL);
8859         I915_WRITE(ILK_DSPCLK_GATE,
8860                    I915_READ(ILK_DSPCLK_GATE) |
8861                    ILK_DPARB_CLK_GATE  |
8862                    ILK_DPFD_CLK_GATE);
8863
8864         for_each_pipe(pipe) {
8865                 I915_WRITE(DSPCNTR(pipe),
8866                            I915_READ(DSPCNTR(pipe)) |
8867                            DISPPLANE_TRICKLE_FEED_DISABLE);
8868                 intel_flush_display_plane(dev_priv, pipe);
8869         }
8870 }
8871
8872 static void ivybridge_init_clock_gating(struct drm_device *dev)
8873 {
8874         struct drm_i915_private *dev_priv = dev->dev_private;
8875         int pipe;
8876         uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8877
8878         I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8879
8880         I915_WRITE(WM3_LP_ILK, 0);
8881         I915_WRITE(WM2_LP_ILK, 0);
8882         I915_WRITE(WM1_LP_ILK, 0);
8883
8884         I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
8885
8886         I915_WRITE(IVB_CHICKEN3,
8887                    CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
8888                    CHICKEN3_DGMG_DONE_FIX_DISABLE);
8889
8890         for_each_pipe(pipe) {
8891                 I915_WRITE(DSPCNTR(pipe),
8892                            I915_READ(DSPCNTR(pipe)) |
8893                            DISPPLANE_TRICKLE_FEED_DISABLE);
8894                 intel_flush_display_plane(dev_priv, pipe);
8895         }
8896 }
8897
8898 static void valleyview_init_clock_gating(struct drm_device *dev)
8899 {
8900         struct drm_i915_private *dev_priv = dev->dev_private;
8901         int pipe;
8902         uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8903
8904         I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8905
8906         I915_WRITE(WM3_LP_ILK, 0);
8907         I915_WRITE(WM2_LP_ILK, 0);
8908         I915_WRITE(WM1_LP_ILK, 0);
8909
8910         /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
8911          * This implements the WaDisableRCZUnitClockGating workaround.
8912          */
8913         I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
8914
8915         I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
8916
8917         I915_WRITE(IVB_CHICKEN3,
8918                    CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
8919                    CHICKEN3_DGMG_DONE_FIX_DISABLE);
8920
8921         /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
8922         I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
8923                    GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
8924
8925         /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
8926         I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
8927         I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
8928
8929         /* This is required by WaCatErrorRejectionIssue */
8930         I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
8931                    I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
8932                    GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
8933
8934         for_each_pipe(pipe) {
8935                 I915_WRITE(DSPCNTR(pipe),
8936                            I915_READ(DSPCNTR(pipe)) |
8937                            DISPPLANE_TRICKLE_FEED_DISABLE);
8938                 intel_flush_display_plane(dev_priv, pipe);
8939         }
8940
8941         I915_WRITE(CACHE_MODE_1, I915_READ(CACHE_MODE_1) |
8942                    (PIXEL_SUBSPAN_COLLECT_OPT_DISABLE << 16) |
8943                    PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
8944 }
8945
8946 static void g4x_init_clock_gating(struct drm_device *dev)
8947 {
8948         struct drm_i915_private *dev_priv = dev->dev_private;
8949         uint32_t dspclk_gate;
8950
8951         I915_WRITE(RENCLK_GATE_D1, 0);
8952         I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
8953                    GS_UNIT_CLOCK_GATE_DISABLE |
8954                    CL_UNIT_CLOCK_GATE_DISABLE);
8955         I915_WRITE(RAMCLK_GATE_D, 0);
8956         dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
8957                 OVRUNIT_CLOCK_GATE_DISABLE |
8958                 OVCUNIT_CLOCK_GATE_DISABLE;
8959         if (IS_GM45(dev))
8960                 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
8961         I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
8962 }
8963
8964 static void crestline_init_clock_gating(struct drm_device *dev)
8965 {
8966         struct drm_i915_private *dev_priv = dev->dev_private;
8967
8968         I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
8969         I915_WRITE(RENCLK_GATE_D2, 0);
8970         I915_WRITE(DSPCLK_GATE_D, 0);
8971         I915_WRITE(RAMCLK_GATE_D, 0);
8972         I915_WRITE16(DEUC, 0);
8973 }
8974
8975 static void broadwater_init_clock_gating(struct drm_device *dev)
8976 {
8977         struct drm_i915_private *dev_priv = dev->dev_private;
8978
8979         I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
8980                    I965_RCC_CLOCK_GATE_DISABLE |
8981                    I965_RCPB_CLOCK_GATE_DISABLE |
8982                    I965_ISC_CLOCK_GATE_DISABLE |
8983                    I965_FBC_CLOCK_GATE_DISABLE);
8984         I915_WRITE(RENCLK_GATE_D2, 0);
8985 }
8986
8987 static void gen3_init_clock_gating(struct drm_device *dev)
8988 {
8989         struct drm_i915_private *dev_priv = dev->dev_private;
8990         u32 dstate = I915_READ(D_STATE);
8991
8992         dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
8993                 DSTATE_DOT_CLOCK_GATING;
8994         I915_WRITE(D_STATE, dstate);
8995 }
8996
8997 static void i85x_init_clock_gating(struct drm_device *dev)
8998 {
8999         struct drm_i915_private *dev_priv = dev->dev_private;
9000
9001         I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
9002 }
9003
9004 static void i830_init_clock_gating(struct drm_device *dev)
9005 {
9006         struct drm_i915_private *dev_priv = dev->dev_private;
9007
9008         I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
9009 }
9010
9011 static void ibx_init_clock_gating(struct drm_device *dev)
9012 {
9013         struct drm_i915_private *dev_priv = dev->dev_private;
9014
9015         /*
9016          * On Ibex Peak and Cougar Point, we need to disable clock
9017          * gating for the panel power sequencer or it will fail to
9018          * start up when no ports are active.
9019          */
9020         I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
9021 }
9022
9023 static void cpt_init_clock_gating(struct drm_device *dev)
9024 {
9025         struct drm_i915_private *dev_priv = dev->dev_private;
9026         int pipe;
9027
9028         /*
9029          * On Ibex Peak and Cougar Point, we need to disable clock
9030          * gating for the panel power sequencer or it will fail to
9031          * start up when no ports are active.
9032          */
9033         I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
9034         I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
9035                    DPLS_EDP_PPS_FIX_DIS);
9036         /* Without this, mode sets may fail silently on FDI */
9037         for_each_pipe(pipe)
9038                 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
9039 }
9040
9041 static void ironlake_teardown_rc6(struct drm_device *dev)
9042 {
9043         struct drm_i915_private *dev_priv = dev->dev_private;
9044
9045         if (dev_priv->renderctx) {
9046                 i915_gem_object_unpin(dev_priv->renderctx);
9047                 drm_gem_object_unreference(&dev_priv->renderctx->base);
9048                 dev_priv->renderctx = NULL;
9049         }
9050
9051         if (dev_priv->pwrctx) {
9052                 i915_gem_object_unpin(dev_priv->pwrctx);
9053                 drm_gem_object_unreference(&dev_priv->pwrctx->base);
9054                 dev_priv->pwrctx = NULL;
9055         }
9056 }
9057
9058 static void ironlake_disable_rc6(struct drm_device *dev)
9059 {
9060         struct drm_i915_private *dev_priv = dev->dev_private;
9061
9062         if (I915_READ(PWRCTXA)) {
9063                 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
9064                 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
9065                 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
9066                          50);
9067
9068                 I915_WRITE(PWRCTXA, 0);
9069                 POSTING_READ(PWRCTXA);
9070
9071                 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
9072                 POSTING_READ(RSTDBYCTL);
9073         }
9074
9075         ironlake_teardown_rc6(dev);
9076 }
9077
9078 static int ironlake_setup_rc6(struct drm_device *dev)
9079 {
9080         struct drm_i915_private *dev_priv = dev->dev_private;
9081
9082         if (dev_priv->renderctx == NULL)
9083                 dev_priv->renderctx = intel_alloc_context_page(dev);
9084         if (!dev_priv->renderctx)
9085                 return -ENOMEM;
9086
9087         if (dev_priv->pwrctx == NULL)
9088                 dev_priv->pwrctx = intel_alloc_context_page(dev);
9089         if (!dev_priv->pwrctx) {
9090                 ironlake_teardown_rc6(dev);
9091                 return -ENOMEM;
9092         }
9093
9094         return 0;
9095 }
9096
9097 void ironlake_enable_rc6(struct drm_device *dev)
9098 {
9099         struct drm_i915_private *dev_priv = dev->dev_private;
9100         int ret;
9101
9102         /* rc6 disabled by default due to repeated reports of hanging during
9103          * boot and resume.
9104          */
9105         if (!intel_enable_rc6(dev))
9106                 return;
9107
9108         mutex_lock(&dev->struct_mutex);
9109         ret = ironlake_setup_rc6(dev);
9110         if (ret) {
9111                 mutex_unlock(&dev->struct_mutex);
9112                 return;
9113         }
9114
9115         /*
9116          * GPU can automatically power down the render unit if given a page
9117          * to save state.
9118          */
9119         ret = BEGIN_LP_RING(6);
9120         if (ret) {
9121                 ironlake_teardown_rc6(dev);
9122                 mutex_unlock(&dev->struct_mutex);
9123                 return;
9124         }
9125
9126         OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
9127         OUT_RING(MI_SET_CONTEXT);
9128         OUT_RING(dev_priv->renderctx->gtt_offset |
9129                  MI_MM_SPACE_GTT |
9130                  MI_SAVE_EXT_STATE_EN |
9131                  MI_RESTORE_EXT_STATE_EN |
9132                  MI_RESTORE_INHIBIT);
9133         OUT_RING(MI_SUSPEND_FLUSH);
9134         OUT_RING(MI_NOOP);
9135         OUT_RING(MI_FLUSH);
9136         ADVANCE_LP_RING();
9137
9138         /*
9139          * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
9140          * does an implicit flush, combined with MI_FLUSH above, it should be
9141          * safe to assume that renderctx is valid
9142          */
9143         ret = intel_wait_ring_idle(LP_RING(dev_priv));
9144         if (ret) {
9145                 DRM_ERROR("failed to enable ironlake power power savings\n");
9146                 ironlake_teardown_rc6(dev);
9147                 mutex_unlock(&dev->struct_mutex);
9148                 return;
9149         }
9150
9151         I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
9152         I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
9153         mutex_unlock(&dev->struct_mutex);
9154 }
9155
9156 void intel_init_clock_gating(struct drm_device *dev)
9157 {
9158         struct drm_i915_private *dev_priv = dev->dev_private;
9159
9160         dev_priv->display.init_clock_gating(dev);
9161
9162         if (dev_priv->display.init_pch_clock_gating)
9163                 dev_priv->display.init_pch_clock_gating(dev);
9164 }
9165
9166 /* Set up chip specific display functions */
9167 static void intel_init_display(struct drm_device *dev)
9168 {
9169         struct drm_i915_private *dev_priv = dev->dev_private;
9170
9171         /* We always want a DPMS function */
9172         if (HAS_PCH_SPLIT(dev)) {
9173                 dev_priv->display.dpms = ironlake_crtc_dpms;
9174                 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
9175                 dev_priv->display.update_plane = ironlake_update_plane;
9176         } else {
9177                 dev_priv->display.dpms = i9xx_crtc_dpms;
9178                 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
9179                 dev_priv->display.update_plane = i9xx_update_plane;
9180         }
9181
9182         if (I915_HAS_FBC(dev)) {
9183                 if (HAS_PCH_SPLIT(dev)) {
9184                         dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
9185                         dev_priv->display.enable_fbc = ironlake_enable_fbc;
9186                         dev_priv->display.disable_fbc = ironlake_disable_fbc;
9187                 } else if (IS_GM45(dev)) {
9188                         dev_priv->display.fbc_enabled = g4x_fbc_enabled;
9189                         dev_priv->display.enable_fbc = g4x_enable_fbc;
9190                         dev_priv->display.disable_fbc = g4x_disable_fbc;
9191                 } else if (IS_CRESTLINE(dev)) {
9192                         dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
9193                         dev_priv->display.enable_fbc = i8xx_enable_fbc;
9194                         dev_priv->display.disable_fbc = i8xx_disable_fbc;
9195                 }
9196                 /* 855GM needs testing */
9197         }
9198
9199         /* Returns the core display clock speed */
9200         if (IS_VALLEYVIEW(dev))
9201                 dev_priv->display.get_display_clock_speed =
9202                         valleyview_get_display_clock_speed;
9203         else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
9204                 dev_priv->display.get_display_clock_speed =
9205                         i945_get_display_clock_speed;
9206         else if (IS_I915G(dev))
9207                 dev_priv->display.get_display_clock_speed =
9208                         i915_get_display_clock_speed;
9209         else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
9210                 dev_priv->display.get_display_clock_speed =
9211                         i9xx_misc_get_display_clock_speed;
9212         else if (IS_I915GM(dev))
9213                 dev_priv->display.get_display_clock_speed =
9214                         i915gm_get_display_clock_speed;
9215         else if (IS_I865G(dev))
9216                 dev_priv->display.get_display_clock_speed =
9217                         i865_get_display_clock_speed;
9218         else if (IS_I85X(dev))
9219                 dev_priv->display.get_display_clock_speed =
9220                         i855_get_display_clock_speed;
9221         else /* 852, 830 */
9222                 dev_priv->display.get_display_clock_speed =
9223                         i830_get_display_clock_speed;
9224
9225         /* For FIFO watermark updates */
9226         if (HAS_PCH_SPLIT(dev)) {
9227                 dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
9228                 dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
9229
9230                 /* IVB configs may use multi-threaded forcewake */
9231                 if (IS_IVYBRIDGE(dev)) {
9232                         u32     ecobus;
9233
9234                         /* A small trick here - if the bios hasn't configured MT forcewake,
9235                          * and if the device is in RC6, then force_wake_mt_get will not wake
9236                          * the device and the ECOBUS read will return zero. Which will be
9237                          * (correctly) interpreted by the test below as MT forcewake being
9238                          * disabled.
9239                          */
9240                         mutex_lock(&dev->struct_mutex);
9241                         __gen6_gt_force_wake_mt_get(dev_priv);
9242                         ecobus = I915_READ_NOTRACE(ECOBUS);
9243                         __gen6_gt_force_wake_mt_put(dev_priv);
9244                         mutex_unlock(&dev->struct_mutex);
9245
9246                         if (ecobus & FORCEWAKE_MT_ENABLE) {
9247                                 DRM_DEBUG_KMS("Using MT version of forcewake\n");
9248                                 dev_priv->display.force_wake_get =
9249                                         __gen6_gt_force_wake_mt_get;
9250                                 dev_priv->display.force_wake_put =
9251                                         __gen6_gt_force_wake_mt_put;
9252                         }
9253                 }
9254
9255                 if (HAS_PCH_IBX(dev))
9256                         dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
9257                 else if (HAS_PCH_CPT(dev))
9258                         dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
9259
9260                 if (IS_GEN5(dev)) {
9261                         if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
9262                                 dev_priv->display.update_wm = ironlake_update_wm;
9263                         else {
9264                                 DRM_DEBUG_KMS("Failed to get proper latency. "
9265                                               "Disable CxSR\n");
9266                                 dev_priv->display.update_wm = NULL;
9267                         }
9268                         dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
9269                         dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
9270                         dev_priv->display.write_eld = ironlake_write_eld;
9271                 } else if (IS_GEN6(dev)) {
9272                         if (SNB_READ_WM0_LATENCY()) {
9273                                 dev_priv->display.update_wm = sandybridge_update_wm;
9274                                 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
9275                         } else {
9276                                 DRM_DEBUG_KMS("Failed to read display plane latency. "
9277                                               "Disable CxSR\n");
9278                                 dev_priv->display.update_wm = NULL;
9279                         }
9280                         dev_priv->display.fdi_link_train = gen6_fdi_link_train;
9281                         dev_priv->display.init_clock_gating = gen6_init_clock_gating;
9282                         dev_priv->display.write_eld = ironlake_write_eld;
9283                 } else if (IS_IVYBRIDGE(dev)) {
9284                         /* FIXME: detect B0+ stepping and use auto training */
9285                         dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
9286                         if (SNB_READ_WM0_LATENCY()) {
9287                                 dev_priv->display.update_wm = sandybridge_update_wm;
9288                                 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
9289                         } else {
9290                                 DRM_DEBUG_KMS("Failed to read display plane latency. "
9291                                               "Disable CxSR\n");
9292                                 dev_priv->display.update_wm = NULL;
9293                         }
9294                         dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
9295                         dev_priv->display.write_eld = ironlake_write_eld;
9296                 } else
9297                         dev_priv->display.update_wm = NULL;
9298         } else if (IS_VALLEYVIEW(dev)) {
9299                 dev_priv->display.update_wm = valleyview_update_wm;
9300                 dev_priv->display.init_clock_gating =
9301                         valleyview_init_clock_gating;
9302                 dev_priv->display.force_wake_get = vlv_force_wake_get;
9303                 dev_priv->display.force_wake_put = vlv_force_wake_put;
9304         } else if (IS_PINEVIEW(dev)) {
9305                 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
9306                                             dev_priv->is_ddr3,
9307                                             dev_priv->fsb_freq,
9308                                             dev_priv->mem_freq)) {
9309                         DRM_INFO("failed to find known CxSR latency "
9310                                  "(found ddr%s fsb freq %d, mem freq %d), "
9311                                  "disabling CxSR\n",
9312                                  (dev_priv->is_ddr3 == 1) ? "3" : "2",
9313                                  dev_priv->fsb_freq, dev_priv->mem_freq);
9314                         /* Disable CxSR and never update its watermark again */
9315                         pineview_disable_cxsr(dev);
9316                         dev_priv->display.update_wm = NULL;
9317                 } else
9318                         dev_priv->display.update_wm = pineview_update_wm;
9319                 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
9320         } else if (IS_G4X(dev)) {
9321                 dev_priv->display.write_eld = g4x_write_eld;
9322                 dev_priv->display.update_wm = g4x_update_wm;
9323                 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
9324         } else if (IS_GEN4(dev)) {
9325                 dev_priv->display.update_wm = i965_update_wm;
9326                 if (IS_CRESTLINE(dev))
9327                         dev_priv->display.init_clock_gating = crestline_init_clock_gating;
9328                 else if (IS_BROADWATER(dev))
9329                         dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
9330         } else if (IS_GEN3(dev)) {
9331                 dev_priv->display.update_wm = i9xx_update_wm;
9332                 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
9333                 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
9334         } else if (IS_I865G(dev)) {
9335                 dev_priv->display.update_wm = i830_update_wm;
9336                 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
9337                 dev_priv->display.get_fifo_size = i830_get_fifo_size;
9338         } else if (IS_I85X(dev)) {
9339                 dev_priv->display.update_wm = i9xx_update_wm;
9340                 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
9341                 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
9342         } else {
9343                 dev_priv->display.update_wm = i830_update_wm;
9344                 dev_priv->display.init_clock_gating = i830_init_clock_gating;
9345                 if (IS_845G(dev))
9346                         dev_priv->display.get_fifo_size = i845_get_fifo_size;
9347                 else
9348                         dev_priv->display.get_fifo_size = i830_get_fifo_size;
9349         }
9350
9351         /* Default just returns -ENODEV to indicate unsupported */
9352         dev_priv->display.queue_flip = intel_default_queue_flip;
9353
9354         switch (INTEL_INFO(dev)->gen) {
9355         case 2:
9356                 dev_priv->display.queue_flip = intel_gen2_queue_flip;
9357                 break;
9358
9359         case 3:
9360                 dev_priv->display.queue_flip = intel_gen3_queue_flip;
9361                 break;
9362
9363         case 4:
9364         case 5:
9365                 dev_priv->display.queue_flip = intel_gen4_queue_flip;
9366                 break;
9367
9368         case 6:
9369                 dev_priv->display.queue_flip = intel_gen6_queue_flip;
9370                 break;
9371         case 7:
9372                 dev_priv->display.queue_flip = intel_gen7_queue_flip;
9373                 break;
9374         }
9375 }
9376
9377 /*
9378  * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
9379  * resume, or other times.  This quirk makes sure that's the case for
9380  * affected systems.
9381  */
9382 static void quirk_pipea_force(struct drm_device *dev)
9383 {
9384         struct drm_i915_private *dev_priv = dev->dev_private;
9385
9386         dev_priv->quirks |= QUIRK_PIPEA_FORCE;
9387         DRM_INFO("applying pipe a force quirk\n");
9388 }
9389
9390 /*
9391  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
9392  */
9393 static void quirk_ssc_force_disable(struct drm_device *dev)
9394 {
9395         struct drm_i915_private *dev_priv = dev->dev_private;
9396         dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
9397         DRM_INFO("applying lvds SSC disable quirk\n");
9398 }
9399
9400 /*
9401  * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
9402  * brightness value
9403  */
9404 static void quirk_invert_brightness(struct drm_device *dev)
9405 {
9406         struct drm_i915_private *dev_priv = dev->dev_private;
9407         dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
9408         DRM_INFO("applying inverted panel brightness quirk\n");
9409 }
9410
9411 struct intel_quirk {
9412         int device;
9413         int subsystem_vendor;
9414         int subsystem_device;
9415         void (*hook)(struct drm_device *dev);
9416 };
9417
9418 struct intel_quirk intel_quirks[] = {
9419         /* HP Mini needs pipe A force quirk (LP: #322104) */
9420         { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
9421
9422         /* Thinkpad R31 needs pipe A force quirk */
9423         { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
9424         /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
9425         { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
9426
9427         /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
9428         { 0x3577,  0x1014, 0x0513, quirk_pipea_force },
9429         /* ThinkPad X40 needs pipe A force quirk */
9430
9431         /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
9432         { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
9433
9434         /* 855 & before need to leave pipe A & dpll A up */
9435         { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
9436         { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
9437
9438         /* Lenovo U160 cannot use SSC on LVDS */
9439         { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
9440
9441         /* Sony Vaio Y cannot use SSC on LVDS */
9442         { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
9443
9444         /* Acer Aspire 5734Z must invert backlight brightness */
9445         { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
9446 };
9447
9448 static void intel_init_quirks(struct drm_device *dev)
9449 {
9450         struct pci_dev *d = dev->pdev;
9451         int i;
9452
9453         for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
9454                 struct intel_quirk *q = &intel_quirks[i];
9455
9456                 if (d->device == q->device &&
9457                     (d->subsystem_vendor == q->subsystem_vendor ||
9458                      q->subsystem_vendor == PCI_ANY_ID) &&
9459                     (d->subsystem_device == q->subsystem_device ||
9460                      q->subsystem_device == PCI_ANY_ID))
9461                         q->hook(dev);
9462         }
9463 }
9464
9465 /* Disable the VGA plane that we never use */
9466 static void i915_disable_vga(struct drm_device *dev)
9467 {
9468         struct drm_i915_private *dev_priv = dev->dev_private;
9469         u8 sr1;
9470         u32 vga_reg;
9471
9472         if (HAS_PCH_SPLIT(dev))
9473                 vga_reg = CPU_VGACNTRL;
9474         else
9475                 vga_reg = VGACNTRL;
9476
9477         vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
9478         outb(1, VGA_SR_INDEX);
9479         sr1 = inb(VGA_SR_DATA);
9480         outb(sr1 | 1<<5, VGA_SR_DATA);
9481         vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
9482         udelay(300);
9483
9484         I915_WRITE(vga_reg, VGA_DISP_DISABLE);
9485         POSTING_READ(vga_reg);
9486 }
9487
9488 void intel_modeset_init(struct drm_device *dev)
9489 {
9490         struct drm_i915_private *dev_priv = dev->dev_private;
9491         int i, ret;
9492
9493         drm_mode_config_init(dev);
9494
9495         dev->mode_config.min_width = 0;
9496         dev->mode_config.min_height = 0;
9497
9498         dev->mode_config.preferred_depth = 24;
9499         dev->mode_config.prefer_shadow = 1;
9500
9501         dev->mode_config.funcs = (void *)&intel_mode_funcs;
9502
9503         intel_init_quirks(dev);
9504
9505         intel_init_display(dev);
9506
9507         if (IS_GEN2(dev)) {
9508                 dev->mode_config.max_width = 2048;
9509                 dev->mode_config.max_height = 2048;
9510         } else if (IS_GEN3(dev)) {
9511                 dev->mode_config.max_width = 4096;
9512                 dev->mode_config.max_height = 4096;
9513         } else {
9514                 dev->mode_config.max_width = 8192;
9515                 dev->mode_config.max_height = 8192;
9516         }
9517         dev->mode_config.fb_base = dev->agp->base;
9518
9519         DRM_DEBUG_KMS("%d display pipe%s available.\n",
9520                       dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
9521
9522         for (i = 0; i < dev_priv->num_pipe; i++) {
9523                 intel_crtc_init(dev, i);
9524                 ret = intel_plane_init(dev, i);
9525                 if (ret)
9526                         DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
9527         }
9528
9529         /* Just disable it once at startup */
9530         i915_disable_vga(dev);
9531         intel_setup_outputs(dev);
9532
9533         intel_init_clock_gating(dev);
9534
9535         if (IS_IRONLAKE_M(dev)) {
9536                 ironlake_enable_drps(dev);
9537                 intel_init_emon(dev);
9538         }
9539
9540         if (IS_GEN6(dev) || IS_GEN7(dev)) {
9541                 gen6_enable_rps(dev_priv);
9542                 gen6_update_ring_freq(dev_priv);
9543         }
9544
9545         INIT_WORK(&dev_priv->idle_work, intel_idle_update);
9546         setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
9547                     (unsigned long)dev);
9548 }
9549
9550 void intel_modeset_gem_init(struct drm_device *dev)
9551 {
9552         if (IS_IRONLAKE_M(dev))
9553                 ironlake_enable_rc6(dev);
9554
9555         intel_setup_overlay(dev);
9556 }
9557
9558 void intel_modeset_cleanup(struct drm_device *dev)
9559 {
9560         struct drm_i915_private *dev_priv = dev->dev_private;
9561         struct drm_crtc *crtc;
9562         struct intel_crtc *intel_crtc;
9563
9564         drm_kms_helper_poll_fini(dev);
9565         mutex_lock(&dev->struct_mutex);
9566
9567         intel_unregister_dsm_handler();
9568
9569
9570         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9571                 /* Skip inactive CRTCs */
9572                 if (!crtc->fb)
9573                         continue;
9574
9575                 intel_crtc = to_intel_crtc(crtc);
9576                 intel_increase_pllclock(crtc);
9577         }
9578
9579         intel_disable_fbc(dev);
9580
9581         if (IS_IRONLAKE_M(dev))
9582                 ironlake_disable_drps(dev);
9583         if (IS_GEN6(dev) || IS_GEN7(dev))
9584                 gen6_disable_rps(dev);
9585
9586         if (IS_IRONLAKE_M(dev))
9587                 ironlake_disable_rc6(dev);
9588
9589         if (IS_VALLEYVIEW(dev))
9590                 vlv_init_dpio(dev);
9591
9592         mutex_unlock(&dev->struct_mutex);
9593
9594         /* Disable the irq before mode object teardown, for the irq might
9595          * enqueue unpin/hotplug work. */
9596         drm_irq_uninstall(dev);
9597         cancel_work_sync(&dev_priv->hotplug_work);
9598         cancel_work_sync(&dev_priv->rps_work);
9599
9600         /* flush any delayed tasks or pending work */
9601         flush_scheduled_work();
9602
9603         /* Shut off idle work before the crtcs get freed. */
9604         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9605                 intel_crtc = to_intel_crtc(crtc);
9606                 del_timer_sync(&intel_crtc->idle_timer);
9607         }
9608         del_timer_sync(&dev_priv->idle_timer);
9609         cancel_work_sync(&dev_priv->idle_work);
9610
9611         drm_mode_config_cleanup(dev);
9612 }
9613
9614 /*
9615  * Return which encoder is currently attached for connector.
9616  */
9617 struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
9618 {
9619         return &intel_attached_encoder(connector)->base;
9620 }
9621
9622 void intel_connector_attach_encoder(struct intel_connector *connector,
9623                                     struct intel_encoder *encoder)
9624 {
9625         connector->encoder = encoder;
9626         drm_mode_connector_attach_encoder(&connector->base,
9627                                           &encoder->base);
9628 }
9629
9630 /*
9631  * set vga decode state - true == enable VGA decode
9632  */
9633 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
9634 {
9635         struct drm_i915_private *dev_priv = dev->dev_private;
9636         u16 gmch_ctrl;
9637
9638         pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
9639         if (state)
9640                 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
9641         else
9642                 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
9643         pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
9644         return 0;
9645 }
9646
9647 #ifdef CONFIG_DEBUG_FS
9648 #include <linux/seq_file.h>
9649
9650 struct intel_display_error_state {
9651         struct intel_cursor_error_state {
9652                 u32 control;
9653                 u32 position;
9654                 u32 base;
9655                 u32 size;
9656         } cursor[2];
9657
9658         struct intel_pipe_error_state {
9659                 u32 conf;
9660                 u32 source;
9661
9662                 u32 htotal;
9663                 u32 hblank;
9664                 u32 hsync;
9665                 u32 vtotal;
9666                 u32 vblank;
9667                 u32 vsync;
9668         } pipe[2];
9669
9670         struct intel_plane_error_state {
9671                 u32 control;
9672                 u32 stride;
9673                 u32 size;
9674                 u32 pos;
9675                 u32 addr;
9676                 u32 surface;
9677                 u32 tile_offset;
9678         } plane[2];
9679 };
9680
9681 struct intel_display_error_state *
9682 intel_display_capture_error_state(struct drm_device *dev)
9683 {
9684         drm_i915_private_t *dev_priv = dev->dev_private;
9685         struct intel_display_error_state *error;
9686         int i;
9687
9688         error = kmalloc(sizeof(*error), GFP_ATOMIC);
9689         if (error == NULL)
9690                 return NULL;
9691
9692         for (i = 0; i < 2; i++) {
9693                 error->cursor[i].control = I915_READ(CURCNTR(i));
9694                 error->cursor[i].position = I915_READ(CURPOS(i));
9695                 error->cursor[i].base = I915_READ(CURBASE(i));
9696
9697                 error->plane[i].control = I915_READ(DSPCNTR(i));
9698                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
9699                 error->plane[i].size = I915_READ(DSPSIZE(i));
9700                 error->plane[i].pos = I915_READ(DSPPOS(i));
9701                 error->plane[i].addr = I915_READ(DSPADDR(i));
9702                 if (INTEL_INFO(dev)->gen >= 4) {
9703                         error->plane[i].surface = I915_READ(DSPSURF(i));
9704                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
9705                 }
9706
9707                 error->pipe[i].conf = I915_READ(PIPECONF(i));
9708                 error->pipe[i].source = I915_READ(PIPESRC(i));
9709                 error->pipe[i].htotal = I915_READ(HTOTAL(i));
9710                 error->pipe[i].hblank = I915_READ(HBLANK(i));
9711                 error->pipe[i].hsync = I915_READ(HSYNC(i));
9712                 error->pipe[i].vtotal = I915_READ(VTOTAL(i));
9713                 error->pipe[i].vblank = I915_READ(VBLANK(i));
9714                 error->pipe[i].vsync = I915_READ(VSYNC(i));
9715         }
9716
9717         return error;
9718 }
9719
9720 void
9721 intel_display_print_error_state(struct seq_file *m,
9722                                 struct drm_device *dev,
9723                                 struct intel_display_error_state *error)
9724 {
9725         int i;
9726
9727         for (i = 0; i < 2; i++) {
9728                 seq_printf(m, "Pipe [%d]:\n", i);
9729                 seq_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
9730                 seq_printf(m, "  SRC: %08x\n", error->pipe[i].source);
9731                 seq_printf(m, "  HTOTAL: %08x\n", error->pipe[i].htotal);
9732                 seq_printf(m, "  HBLANK: %08x\n", error->pipe[i].hblank);
9733                 seq_printf(m, "  HSYNC: %08x\n", error->pipe[i].hsync);
9734                 seq_printf(m, "  VTOTAL: %08x\n", error->pipe[i].vtotal);
9735                 seq_printf(m, "  VBLANK: %08x\n", error->pipe[i].vblank);
9736                 seq_printf(m, "  VSYNC: %08x\n", error->pipe[i].vsync);
9737
9738                 seq_printf(m, "Plane [%d]:\n", i);
9739                 seq_printf(m, "  CNTR: %08x\n", error->plane[i].control);
9740                 seq_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
9741                 seq_printf(m, "  SIZE: %08x\n", error->plane[i].size);
9742                 seq_printf(m, "  POS: %08x\n", error->plane[i].pos);
9743                 seq_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
9744                 if (INTEL_INFO(dev)->gen >= 4) {
9745                         seq_printf(m, "  SURF: %08x\n", error->plane[i].surface);
9746                         seq_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
9747                 }
9748
9749                 seq_printf(m, "Cursor [%d]:\n", i);
9750                 seq_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
9751                 seq_printf(m, "  POS: %08x\n", error->cursor[i].position);
9752                 seq_printf(m, "  BASE: %08x\n", error->cursor[i].base);
9753         }
9754 }
9755 #endif
This page took 0.611006 seconds and 4 git commands to generate.