]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/display/intel_fbc.c
Merge tag 'amd-drm-next-6.5-2023-06-09' of https://gitlab.freedesktop.org/agd5f/linux...
[linux.git] / drivers / gpu / drm / i915 / display / intel_fbc.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23
24 /**
25  * DOC: Frame Buffer Compression (FBC)
26  *
27  * FBC tries to save memory bandwidth (and so power consumption) by
28  * compressing the amount of memory used by the display. It is total
29  * transparent to user space and completely handled in the kernel.
30  *
31  * The benefits of FBC are mostly visible with solid backgrounds and
32  * variation-less patterns. It comes from keeping the memory footprint small
33  * and having fewer memory pages opened and accessed for refreshing the display.
34  *
35  * i915 is responsible to reserve stolen memory for FBC and configure its
36  * offset on proper registers. The hardware takes care of all
37  * compress/decompress. However there are many known cases where we have to
38  * forcibly disable it to allow proper screen updates.
39  */
40
41 #include <linux/string_helpers.h>
42
43 #include <drm/drm_blend.h>
44 #include <drm/drm_fourcc.h>
45
46 #include "i915_drv.h"
47 #include "i915_reg.h"
48 #include "i915_utils.h"
49 #include "i915_vgpu.h"
50 #include "intel_cdclk.h"
51 #include "intel_de.h"
52 #include "intel_display_trace.h"
53 #include "intel_display_types.h"
54 #include "intel_fbc.h"
55 #include "intel_frontbuffer.h"
56
57 #define for_each_fbc_id(__dev_priv, __fbc_id) \
58         for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++) \
59                 for_each_if(DISPLAY_RUNTIME_INFO(__dev_priv)->fbc_mask & BIT(__fbc_id))
60
61 #define for_each_intel_fbc(__dev_priv, __fbc, __fbc_id) \
62         for_each_fbc_id((__dev_priv), (__fbc_id)) \
63                 for_each_if((__fbc) = (__dev_priv)->display.fbc[(__fbc_id)])
64
65 struct intel_fbc_funcs {
66         void (*activate)(struct intel_fbc *fbc);
67         void (*deactivate)(struct intel_fbc *fbc);
68         bool (*is_active)(struct intel_fbc *fbc);
69         bool (*is_compressing)(struct intel_fbc *fbc);
70         void (*nuke)(struct intel_fbc *fbc);
71         void (*program_cfb)(struct intel_fbc *fbc);
72         void (*set_false_color)(struct intel_fbc *fbc, bool enable);
73 };
74
75 struct intel_fbc_state {
76         struct intel_plane *plane;
77         unsigned int cfb_stride;
78         unsigned int cfb_size;
79         unsigned int fence_y_offset;
80         u16 override_cfb_stride;
81         u16 interval;
82         s8 fence_id;
83 };
84
85 struct intel_fbc {
86         struct drm_i915_private *i915;
87         const struct intel_fbc_funcs *funcs;
88
89         /*
90          * This is always the inner lock when overlapping with
91          * struct_mutex and it's the outer lock when overlapping
92          * with stolen_lock.
93          */
94         struct mutex lock;
95         unsigned int busy_bits;
96
97         struct drm_mm_node compressed_fb;
98         struct drm_mm_node compressed_llb;
99
100         enum intel_fbc_id id;
101
102         u8 limit;
103
104         bool false_color;
105
106         bool active;
107         bool activated;
108         bool flip_pending;
109
110         bool underrun_detected;
111         struct work_struct underrun_work;
112
113         /*
114          * This structure contains everything that's relevant to program the
115          * hardware registers. When we want to figure out if we need to disable
116          * and re-enable FBC for a new configuration we just check if there's
117          * something different in the struct. The genx_fbc_activate functions
118          * are supposed to read from it in order to program the registers.
119          */
120         struct intel_fbc_state state;
121         const char *no_fbc_reason;
122 };
123
124 /* plane stride in pixels */
125 static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane_state)
126 {
127         const struct drm_framebuffer *fb = plane_state->hw.fb;
128         unsigned int stride;
129
130         stride = plane_state->view.color_plane[0].mapping_stride;
131         if (!drm_rotation_90_or_270(plane_state->hw.rotation))
132                 stride /= fb->format->cpp[0];
133
134         return stride;
135 }
136
137 /* plane stride based cfb stride in bytes, assuming 1:1 compression limit */
138 static unsigned int _intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
139 {
140         unsigned int cpp = 4; /* FBC always 4 bytes per pixel */
141
142         return intel_fbc_plane_stride(plane_state) * cpp;
143 }
144
145 /* minimum acceptable cfb stride in bytes, assuming 1:1 compression limit */
146 static unsigned int skl_fbc_min_cfb_stride(const struct intel_plane_state *plane_state)
147 {
148         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
149         unsigned int limit = 4; /* 1:4 compression limit is the worst case */
150         unsigned int cpp = 4; /* FBC always 4 bytes per pixel */
151         unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16;
152         unsigned int height = 4; /* FBC segment is 4 lines */
153         unsigned int stride;
154
155         /* minimum segment stride we can use */
156         stride = width * cpp * height / limit;
157
158         /*
159          * Wa_16011863758: icl+
160          * Avoid some hardware segment address miscalculation.
161          */
162         if (DISPLAY_VER(i915) >= 11)
163                 stride += 64;
164
165         /*
166          * At least some of the platforms require each 4 line segment to
167          * be 512 byte aligned. Just do it always for simplicity.
168          */
169         stride = ALIGN(stride, 512);
170
171         /* convert back to single line equivalent with 1:1 compression limit */
172         return stride * limit / height;
173 }
174
175 /* properly aligned cfb stride in bytes, assuming 1:1 compression limit */
176 static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
177 {
178         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
179         unsigned int stride = _intel_fbc_cfb_stride(plane_state);
180
181         /*
182          * At least some of the platforms require each 4 line segment to
183          * be 512 byte aligned. Aligning each line to 512 bytes guarantees
184          * that regardless of the compression limit we choose later.
185          */
186         if (DISPLAY_VER(i915) >= 9)
187                 return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(plane_state));
188         else
189                 return stride;
190 }
191
192 static unsigned int intel_fbc_cfb_size(const struct intel_plane_state *plane_state)
193 {
194         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
195         int lines = drm_rect_height(&plane_state->uapi.src) >> 16;
196
197         if (DISPLAY_VER(i915) == 7)
198                 lines = min(lines, 2048);
199         else if (DISPLAY_VER(i915) >= 8)
200                 lines = min(lines, 2560);
201
202         return lines * intel_fbc_cfb_stride(plane_state);
203 }
204
205 static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_state)
206 {
207         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
208         unsigned int stride_aligned = intel_fbc_cfb_stride(plane_state);
209         unsigned int stride = _intel_fbc_cfb_stride(plane_state);
210         const struct drm_framebuffer *fb = plane_state->hw.fb;
211
212         /*
213          * Override stride in 64 byte units per 4 line segment.
214          *
215          * Gen9 hw miscalculates cfb stride for linear as
216          * PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so
217          * we always need to use the override there.
218          */
219         if (stride != stride_aligned ||
220             (DISPLAY_VER(i915) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR))
221                 return stride_aligned * 4 / 64;
222
223         return 0;
224 }
225
226 static u32 i8xx_fbc_ctl(struct intel_fbc *fbc)
227 {
228         const struct intel_fbc_state *fbc_state = &fbc->state;
229         struct drm_i915_private *i915 = fbc->i915;
230         unsigned int cfb_stride;
231         u32 fbc_ctl;
232
233         cfb_stride = fbc_state->cfb_stride / fbc->limit;
234
235         /* FBC_CTL wants 32B or 64B units */
236         if (DISPLAY_VER(i915) == 2)
237                 cfb_stride = (cfb_stride / 32) - 1;
238         else
239                 cfb_stride = (cfb_stride / 64) - 1;
240
241         fbc_ctl = FBC_CTL_PERIODIC |
242                 FBC_CTL_INTERVAL(fbc_state->interval) |
243                 FBC_CTL_STRIDE(cfb_stride);
244
245         if (IS_I945GM(i915))
246                 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
247
248         if (fbc_state->fence_id >= 0)
249                 fbc_ctl |= FBC_CTL_FENCENO(fbc_state->fence_id);
250
251         return fbc_ctl;
252 }
253
254 static u32 i965_fbc_ctl2(struct intel_fbc *fbc)
255 {
256         const struct intel_fbc_state *fbc_state = &fbc->state;
257         u32 fbc_ctl2;
258
259         fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM |
260                 FBC_CTL_PLANE(fbc_state->plane->i9xx_plane);
261
262         if (fbc_state->fence_id >= 0)
263                 fbc_ctl2 |= FBC_CTL_CPU_FENCE_EN;
264
265         return fbc_ctl2;
266 }
267
268 static void i8xx_fbc_deactivate(struct intel_fbc *fbc)
269 {
270         struct drm_i915_private *i915 = fbc->i915;
271         u32 fbc_ctl;
272
273         /* Disable compression */
274         fbc_ctl = intel_de_read(i915, FBC_CONTROL);
275         if ((fbc_ctl & FBC_CTL_EN) == 0)
276                 return;
277
278         fbc_ctl &= ~FBC_CTL_EN;
279         intel_de_write(i915, FBC_CONTROL, fbc_ctl);
280
281         /* Wait for compressing bit to clear */
282         if (intel_de_wait_for_clear(i915, FBC_STATUS,
283                                     FBC_STAT_COMPRESSING, 10)) {
284                 drm_dbg_kms(&i915->drm, "FBC idle timed out\n");
285                 return;
286         }
287 }
288
289 static void i8xx_fbc_activate(struct intel_fbc *fbc)
290 {
291         const struct intel_fbc_state *fbc_state = &fbc->state;
292         struct drm_i915_private *i915 = fbc->i915;
293         int i;
294
295         /* Clear old tags */
296         for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
297                 intel_de_write(i915, FBC_TAG(i), 0);
298
299         if (DISPLAY_VER(i915) == 4) {
300                 intel_de_write(i915, FBC_CONTROL2,
301                                i965_fbc_ctl2(fbc));
302                 intel_de_write(i915, FBC_FENCE_OFF,
303                                fbc_state->fence_y_offset);
304         }
305
306         intel_de_write(i915, FBC_CONTROL,
307                        FBC_CTL_EN | i8xx_fbc_ctl(fbc));
308 }
309
310 static bool i8xx_fbc_is_active(struct intel_fbc *fbc)
311 {
312         return intel_de_read(fbc->i915, FBC_CONTROL) & FBC_CTL_EN;
313 }
314
315 static bool i8xx_fbc_is_compressing(struct intel_fbc *fbc)
316 {
317         return intel_de_read(fbc->i915, FBC_STATUS) &
318                 (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
319 }
320
321 static void i8xx_fbc_nuke(struct intel_fbc *fbc)
322 {
323         struct intel_fbc_state *fbc_state = &fbc->state;
324         enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
325         struct drm_i915_private *dev_priv = fbc->i915;
326
327         intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
328                           intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane)));
329 }
330
331 static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
332 {
333         struct drm_i915_private *i915 = fbc->i915;
334
335         GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.stolen.start,
336                                          fbc->compressed_fb.start, U32_MAX));
337         GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.stolen.start,
338                                          fbc->compressed_llb.start, U32_MAX));
339
340         intel_de_write(i915, FBC_CFB_BASE,
341                        i915->dsm.stolen.start + fbc->compressed_fb.start);
342         intel_de_write(i915, FBC_LL_BASE,
343                        i915->dsm.stolen.start + fbc->compressed_llb.start);
344 }
345
346 static const struct intel_fbc_funcs i8xx_fbc_funcs = {
347         .activate = i8xx_fbc_activate,
348         .deactivate = i8xx_fbc_deactivate,
349         .is_active = i8xx_fbc_is_active,
350         .is_compressing = i8xx_fbc_is_compressing,
351         .nuke = i8xx_fbc_nuke,
352         .program_cfb = i8xx_fbc_program_cfb,
353 };
354
355 static void i965_fbc_nuke(struct intel_fbc *fbc)
356 {
357         struct intel_fbc_state *fbc_state = &fbc->state;
358         enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
359         struct drm_i915_private *dev_priv = fbc->i915;
360
361         intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
362                           intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane)));
363 }
364
365 static const struct intel_fbc_funcs i965_fbc_funcs = {
366         .activate = i8xx_fbc_activate,
367         .deactivate = i8xx_fbc_deactivate,
368         .is_active = i8xx_fbc_is_active,
369         .is_compressing = i8xx_fbc_is_compressing,
370         .nuke = i965_fbc_nuke,
371         .program_cfb = i8xx_fbc_program_cfb,
372 };
373
374 static u32 g4x_dpfc_ctl_limit(struct intel_fbc *fbc)
375 {
376         switch (fbc->limit) {
377         default:
378                 MISSING_CASE(fbc->limit);
379                 fallthrough;
380         case 1:
381                 return DPFC_CTL_LIMIT_1X;
382         case 2:
383                 return DPFC_CTL_LIMIT_2X;
384         case 4:
385                 return DPFC_CTL_LIMIT_4X;
386         }
387 }
388
389 static u32 g4x_dpfc_ctl(struct intel_fbc *fbc)
390 {
391         const struct intel_fbc_state *fbc_state = &fbc->state;
392         struct drm_i915_private *i915 = fbc->i915;
393         u32 dpfc_ctl;
394
395         dpfc_ctl = g4x_dpfc_ctl_limit(fbc) |
396                 DPFC_CTL_PLANE_G4X(fbc_state->plane->i9xx_plane);
397
398         if (IS_G4X(i915))
399                 dpfc_ctl |= DPFC_CTL_SR_EN;
400
401         if (fbc_state->fence_id >= 0) {
402                 dpfc_ctl |= DPFC_CTL_FENCE_EN_G4X;
403
404                 if (DISPLAY_VER(i915) < 6)
405                         dpfc_ctl |= DPFC_CTL_FENCENO(fbc_state->fence_id);
406         }
407
408         return dpfc_ctl;
409 }
410
411 static void g4x_fbc_activate(struct intel_fbc *fbc)
412 {
413         const struct intel_fbc_state *fbc_state = &fbc->state;
414         struct drm_i915_private *i915 = fbc->i915;
415
416         intel_de_write(i915, DPFC_FENCE_YOFF,
417                        fbc_state->fence_y_offset);
418
419         intel_de_write(i915, DPFC_CONTROL,
420                        DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
421 }
422
423 static void g4x_fbc_deactivate(struct intel_fbc *fbc)
424 {
425         struct drm_i915_private *i915 = fbc->i915;
426         u32 dpfc_ctl;
427
428         /* Disable compression */
429         dpfc_ctl = intel_de_read(i915, DPFC_CONTROL);
430         if (dpfc_ctl & DPFC_CTL_EN) {
431                 dpfc_ctl &= ~DPFC_CTL_EN;
432                 intel_de_write(i915, DPFC_CONTROL, dpfc_ctl);
433         }
434 }
435
436 static bool g4x_fbc_is_active(struct intel_fbc *fbc)
437 {
438         return intel_de_read(fbc->i915, DPFC_CONTROL) & DPFC_CTL_EN;
439 }
440
441 static bool g4x_fbc_is_compressing(struct intel_fbc *fbc)
442 {
443         return intel_de_read(fbc->i915, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
444 }
445
446 static void g4x_fbc_program_cfb(struct intel_fbc *fbc)
447 {
448         struct drm_i915_private *i915 = fbc->i915;
449
450         intel_de_write(i915, DPFC_CB_BASE, fbc->compressed_fb.start);
451 }
452
453 static const struct intel_fbc_funcs g4x_fbc_funcs = {
454         .activate = g4x_fbc_activate,
455         .deactivate = g4x_fbc_deactivate,
456         .is_active = g4x_fbc_is_active,
457         .is_compressing = g4x_fbc_is_compressing,
458         .nuke = i965_fbc_nuke,
459         .program_cfb = g4x_fbc_program_cfb,
460 };
461
462 static void ilk_fbc_activate(struct intel_fbc *fbc)
463 {
464         struct intel_fbc_state *fbc_state = &fbc->state;
465         struct drm_i915_private *i915 = fbc->i915;
466
467         intel_de_write(i915, ILK_DPFC_FENCE_YOFF(fbc->id),
468                        fbc_state->fence_y_offset);
469
470         intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
471                        DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
472 }
473
474 static void ilk_fbc_deactivate(struct intel_fbc *fbc)
475 {
476         struct drm_i915_private *i915 = fbc->i915;
477         u32 dpfc_ctl;
478
479         /* Disable compression */
480         dpfc_ctl = intel_de_read(i915, ILK_DPFC_CONTROL(fbc->id));
481         if (dpfc_ctl & DPFC_CTL_EN) {
482                 dpfc_ctl &= ~DPFC_CTL_EN;
483                 intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
484         }
485 }
486
487 static bool ilk_fbc_is_active(struct intel_fbc *fbc)
488 {
489         return intel_de_read(fbc->i915, ILK_DPFC_CONTROL(fbc->id)) & DPFC_CTL_EN;
490 }
491
492 static bool ilk_fbc_is_compressing(struct intel_fbc *fbc)
493 {
494         return intel_de_read(fbc->i915, ILK_DPFC_STATUS(fbc->id)) & DPFC_COMP_SEG_MASK;
495 }
496
497 static void ilk_fbc_program_cfb(struct intel_fbc *fbc)
498 {
499         struct drm_i915_private *i915 = fbc->i915;
500
501         intel_de_write(i915, ILK_DPFC_CB_BASE(fbc->id), fbc->compressed_fb.start);
502 }
503
504 static const struct intel_fbc_funcs ilk_fbc_funcs = {
505         .activate = ilk_fbc_activate,
506         .deactivate = ilk_fbc_deactivate,
507         .is_active = ilk_fbc_is_active,
508         .is_compressing = ilk_fbc_is_compressing,
509         .nuke = i965_fbc_nuke,
510         .program_cfb = ilk_fbc_program_cfb,
511 };
512
513 static void snb_fbc_program_fence(struct intel_fbc *fbc)
514 {
515         const struct intel_fbc_state *fbc_state = &fbc->state;
516         struct drm_i915_private *i915 = fbc->i915;
517         u32 ctl = 0;
518
519         if (fbc_state->fence_id >= 0)
520                 ctl = SNB_DPFC_FENCE_EN | SNB_DPFC_FENCENO(fbc_state->fence_id);
521
522         intel_de_write(i915, SNB_DPFC_CTL_SA, ctl);
523         intel_de_write(i915, SNB_DPFC_CPU_FENCE_OFFSET, fbc_state->fence_y_offset);
524 }
525
526 static void snb_fbc_activate(struct intel_fbc *fbc)
527 {
528         snb_fbc_program_fence(fbc);
529
530         ilk_fbc_activate(fbc);
531 }
532
533 static void snb_fbc_nuke(struct intel_fbc *fbc)
534 {
535         struct drm_i915_private *i915 = fbc->i915;
536
537         intel_de_write(i915, MSG_FBC_REND_STATE(fbc->id), FBC_REND_NUKE);
538         intel_de_posting_read(i915, MSG_FBC_REND_STATE(fbc->id));
539 }
540
541 static const struct intel_fbc_funcs snb_fbc_funcs = {
542         .activate = snb_fbc_activate,
543         .deactivate = ilk_fbc_deactivate,
544         .is_active = ilk_fbc_is_active,
545         .is_compressing = ilk_fbc_is_compressing,
546         .nuke = snb_fbc_nuke,
547         .program_cfb = ilk_fbc_program_cfb,
548 };
549
550 static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc)
551 {
552         const struct intel_fbc_state *fbc_state = &fbc->state;
553         struct drm_i915_private *i915 = fbc->i915;
554         u32 val = 0;
555
556         if (fbc_state->override_cfb_stride)
557                 val |= FBC_STRIDE_OVERRIDE |
558                         FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit);
559
560         intel_de_write(i915, GLK_FBC_STRIDE(fbc->id), val);
561 }
562
563 static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc)
564 {
565         const struct intel_fbc_state *fbc_state = &fbc->state;
566         struct drm_i915_private *i915 = fbc->i915;
567         u32 val = 0;
568
569         /* Display WA #0529: skl, kbl, bxt. */
570         if (fbc_state->override_cfb_stride)
571                 val |= CHICKEN_FBC_STRIDE_OVERRIDE |
572                         CHICKEN_FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit);
573
574         intel_de_rmw(i915, CHICKEN_MISC_4,
575                      CHICKEN_FBC_STRIDE_OVERRIDE |
576                      CHICKEN_FBC_STRIDE_MASK, val);
577 }
578
579 static u32 ivb_dpfc_ctl(struct intel_fbc *fbc)
580 {
581         const struct intel_fbc_state *fbc_state = &fbc->state;
582         struct drm_i915_private *i915 = fbc->i915;
583         u32 dpfc_ctl;
584
585         dpfc_ctl = g4x_dpfc_ctl_limit(fbc);
586
587         if (IS_IVYBRIDGE(i915))
588                 dpfc_ctl |= DPFC_CTL_PLANE_IVB(fbc_state->plane->i9xx_plane);
589
590         if (fbc_state->fence_id >= 0)
591                 dpfc_ctl |= DPFC_CTL_FENCE_EN_IVB;
592
593         if (fbc->false_color)
594                 dpfc_ctl |= DPFC_CTL_FALSE_COLOR;
595
596         return dpfc_ctl;
597 }
598
599 static void ivb_fbc_activate(struct intel_fbc *fbc)
600 {
601         struct drm_i915_private *i915 = fbc->i915;
602
603         if (DISPLAY_VER(i915) >= 10)
604                 glk_fbc_program_cfb_stride(fbc);
605         else if (DISPLAY_VER(i915) == 9)
606                 skl_fbc_program_cfb_stride(fbc);
607
608         if (to_gt(i915)->ggtt->num_fences)
609                 snb_fbc_program_fence(fbc);
610
611         intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
612                        DPFC_CTL_EN | ivb_dpfc_ctl(fbc));
613 }
614
615 static bool ivb_fbc_is_compressing(struct intel_fbc *fbc)
616 {
617         return intel_de_read(fbc->i915, ILK_DPFC_STATUS2(fbc->id)) & DPFC_COMP_SEG_MASK_IVB;
618 }
619
620 static void ivb_fbc_set_false_color(struct intel_fbc *fbc,
621                                     bool enable)
622 {
623         intel_de_rmw(fbc->i915, ILK_DPFC_CONTROL(fbc->id),
624                      DPFC_CTL_FALSE_COLOR, enable ? DPFC_CTL_FALSE_COLOR : 0);
625 }
626
627 static const struct intel_fbc_funcs ivb_fbc_funcs = {
628         .activate = ivb_fbc_activate,
629         .deactivate = ilk_fbc_deactivate,
630         .is_active = ilk_fbc_is_active,
631         .is_compressing = ivb_fbc_is_compressing,
632         .nuke = snb_fbc_nuke,
633         .program_cfb = ilk_fbc_program_cfb,
634         .set_false_color = ivb_fbc_set_false_color,
635 };
636
637 static bool intel_fbc_hw_is_active(struct intel_fbc *fbc)
638 {
639         return fbc->funcs->is_active(fbc);
640 }
641
642 static void intel_fbc_hw_activate(struct intel_fbc *fbc)
643 {
644         trace_intel_fbc_activate(fbc->state.plane);
645
646         fbc->active = true;
647         fbc->activated = true;
648
649         fbc->funcs->activate(fbc);
650 }
651
652 static void intel_fbc_hw_deactivate(struct intel_fbc *fbc)
653 {
654         trace_intel_fbc_deactivate(fbc->state.plane);
655
656         fbc->active = false;
657
658         fbc->funcs->deactivate(fbc);
659 }
660
661 static bool intel_fbc_is_compressing(struct intel_fbc *fbc)
662 {
663         return fbc->funcs->is_compressing(fbc);
664 }
665
666 static void intel_fbc_nuke(struct intel_fbc *fbc)
667 {
668         struct drm_i915_private *i915 = fbc->i915;
669
670         lockdep_assert_held(&fbc->lock);
671         drm_WARN_ON(&i915->drm, fbc->flip_pending);
672
673         trace_intel_fbc_nuke(fbc->state.plane);
674
675         fbc->funcs->nuke(fbc);
676 }
677
678 static void intel_fbc_activate(struct intel_fbc *fbc)
679 {
680         lockdep_assert_held(&fbc->lock);
681
682         intel_fbc_hw_activate(fbc);
683         intel_fbc_nuke(fbc);
684
685         fbc->no_fbc_reason = NULL;
686 }
687
688 static void intel_fbc_deactivate(struct intel_fbc *fbc, const char *reason)
689 {
690         lockdep_assert_held(&fbc->lock);
691
692         if (fbc->active)
693                 intel_fbc_hw_deactivate(fbc);
694
695         fbc->no_fbc_reason = reason;
696 }
697
698 static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915)
699 {
700         if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915))
701                 return BIT_ULL(28);
702         else
703                 return BIT_ULL(32);
704 }
705
706 static u64 intel_fbc_stolen_end(struct drm_i915_private *i915)
707 {
708         u64 end;
709
710         /* The FBC hardware for BDW/SKL doesn't have access to the stolen
711          * reserved range size, so it always assumes the maximum (8mb) is used.
712          * If we enable FBC using a CFB on that memory range we'll get FIFO
713          * underruns, even if that range is not reserved by the BIOS. */
714         if (IS_BROADWELL(i915) ||
715             (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)))
716                 end = resource_size(&i915->dsm.stolen) - 8 * 1024 * 1024;
717         else
718                 end = U64_MAX;
719
720         return min(end, intel_fbc_cfb_base_max(i915));
721 }
722
723 static int intel_fbc_min_limit(const struct intel_plane_state *plane_state)
724 {
725         return plane_state->hw.fb->format->cpp[0] == 2 ? 2 : 1;
726 }
727
728 static int intel_fbc_max_limit(struct drm_i915_private *i915)
729 {
730         /* WaFbcOnly1to1Ratio:ctg */
731         if (IS_G4X(i915))
732                 return 1;
733
734         /*
735          * FBC2 can only do 1:1, 1:2, 1:4, we limit
736          * FBC1 to the same out of convenience.
737          */
738         return 4;
739 }
740
741 static int find_compression_limit(struct intel_fbc *fbc,
742                                   unsigned int size, int min_limit)
743 {
744         struct drm_i915_private *i915 = fbc->i915;
745         u64 end = intel_fbc_stolen_end(i915);
746         int ret, limit = min_limit;
747
748         size /= limit;
749
750         /* Try to over-allocate to reduce reallocations and fragmentation. */
751         ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
752                                                    size <<= 1, 4096, 0, end);
753         if (ret == 0)
754                 return limit;
755
756         for (; limit <= intel_fbc_max_limit(i915); limit <<= 1) {
757                 ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
758                                                            size >>= 1, 4096, 0, end);
759                 if (ret == 0)
760                         return limit;
761         }
762
763         return 0;
764 }
765
766 static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
767                                unsigned int size, int min_limit)
768 {
769         struct drm_i915_private *i915 = fbc->i915;
770         int ret;
771
772         drm_WARN_ON(&i915->drm,
773                     drm_mm_node_allocated(&fbc->compressed_fb));
774         drm_WARN_ON(&i915->drm,
775                     drm_mm_node_allocated(&fbc->compressed_llb));
776
777         if (DISPLAY_VER(i915) < 5 && !IS_G4X(i915)) {
778                 ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb,
779                                                   4096, 4096);
780                 if (ret)
781                         goto err;
782         }
783
784         ret = find_compression_limit(fbc, size, min_limit);
785         if (!ret)
786                 goto err_llb;
787         else if (ret > min_limit)
788                 drm_info_once(&i915->drm,
789                               "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
790
791         fbc->limit = ret;
792
793         drm_dbg_kms(&i915->drm,
794                     "reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n",
795                     fbc->compressed_fb.size, fbc->limit);
796
797         return 0;
798
799 err_llb:
800         if (drm_mm_node_allocated(&fbc->compressed_llb))
801                 i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
802 err:
803         if (drm_mm_initialized(&i915->mm.stolen))
804                 drm_info_once(&i915->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
805         return -ENOSPC;
806 }
807
808 static void intel_fbc_program_cfb(struct intel_fbc *fbc)
809 {
810         fbc->funcs->program_cfb(fbc);
811 }
812
813 static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
814 {
815         /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp,mtl */
816         if (DISPLAY_VER(fbc->i915) >= 11 && !IS_DG2(fbc->i915))
817                 intel_de_rmw(fbc->i915, ILK_DPFC_CHICKEN(fbc->id), 0,
818                              DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
819 }
820
821 static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
822 {
823         struct drm_i915_private *i915 = fbc->i915;
824
825         if (WARN_ON(intel_fbc_hw_is_active(fbc)))
826                 return;
827
828         if (drm_mm_node_allocated(&fbc->compressed_llb))
829                 i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
830         if (drm_mm_node_allocated(&fbc->compressed_fb))
831                 i915_gem_stolen_remove_node(i915, &fbc->compressed_fb);
832 }
833
834 void intel_fbc_cleanup(struct drm_i915_private *i915)
835 {
836         struct intel_fbc *fbc;
837         enum intel_fbc_id fbc_id;
838
839         for_each_intel_fbc(i915, fbc, fbc_id) {
840                 mutex_lock(&fbc->lock);
841                 __intel_fbc_cleanup_cfb(fbc);
842                 mutex_unlock(&fbc->lock);
843
844                 kfree(fbc);
845         }
846 }
847
848 static bool stride_is_valid(const struct intel_plane_state *plane_state)
849 {
850         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
851         const struct drm_framebuffer *fb = plane_state->hw.fb;
852         unsigned int stride = intel_fbc_plane_stride(plane_state) *
853                 fb->format->cpp[0];
854
855         /* This should have been caught earlier. */
856         if (drm_WARN_ON_ONCE(&i915->drm, (stride & (64 - 1)) != 0))
857                 return false;
858
859         /* Below are the additional FBC restrictions. */
860         if (stride < 512)
861                 return false;
862
863         if (DISPLAY_VER(i915) == 2 || DISPLAY_VER(i915) == 3)
864                 return stride == 4096 || stride == 8192;
865
866         if (DISPLAY_VER(i915) == 4 && !IS_G4X(i915) && stride < 2048)
867                 return false;
868
869         /* Display WA #1105: skl,bxt,kbl,cfl,glk */
870         if ((DISPLAY_VER(i915) == 9 || IS_GEMINILAKE(i915)) &&
871             fb->modifier == DRM_FORMAT_MOD_LINEAR && stride & 511)
872                 return false;
873
874         if (stride > 16384)
875                 return false;
876
877         return true;
878 }
879
880 static bool pixel_format_is_valid(const struct intel_plane_state *plane_state)
881 {
882         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
883         const struct drm_framebuffer *fb = plane_state->hw.fb;
884
885         switch (fb->format->format) {
886         case DRM_FORMAT_XRGB8888:
887         case DRM_FORMAT_XBGR8888:
888                 return true;
889         case DRM_FORMAT_XRGB1555:
890         case DRM_FORMAT_RGB565:
891                 /* 16bpp not supported on gen2 */
892                 if (DISPLAY_VER(i915) == 2)
893                         return false;
894                 /* WaFbcOnly1to1Ratio:ctg */
895                 if (IS_G4X(i915))
896                         return false;
897                 return true;
898         default:
899                 return false;
900         }
901 }
902
903 static bool rotation_is_valid(const struct intel_plane_state *plane_state)
904 {
905         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
906         const struct drm_framebuffer *fb = plane_state->hw.fb;
907         unsigned int rotation = plane_state->hw.rotation;
908
909         if (DISPLAY_VER(i915) >= 9 && fb->format->format == DRM_FORMAT_RGB565 &&
910             drm_rotation_90_or_270(rotation))
911                 return false;
912         else if (DISPLAY_VER(i915) <= 4 && !IS_G4X(i915) &&
913                  rotation != DRM_MODE_ROTATE_0)
914                 return false;
915
916         return true;
917 }
918
919 /*
920  * For some reason, the hardware tracking starts looking at whatever we
921  * programmed as the display plane base address register. It does not look at
922  * the X and Y offset registers. That's why we include the src x/y offsets
923  * instead of just looking at the plane size.
924  */
925 static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *plane_state)
926 {
927         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
928         unsigned int effective_w, effective_h, max_w, max_h;
929
930         if (DISPLAY_VER(i915) >= 10) {
931                 max_w = 5120;
932                 max_h = 4096;
933         } else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) {
934                 max_w = 4096;
935                 max_h = 4096;
936         } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) {
937                 max_w = 4096;
938                 max_h = 2048;
939         } else {
940                 max_w = 2048;
941                 max_h = 1536;
942         }
943
944         effective_w = plane_state->view.color_plane[0].x +
945                 (drm_rect_width(&plane_state->uapi.src) >> 16);
946         effective_h = plane_state->view.color_plane[0].y +
947                 (drm_rect_height(&plane_state->uapi.src) >> 16);
948
949         return effective_w <= max_w && effective_h <= max_h;
950 }
951
952 static bool tiling_is_valid(const struct intel_plane_state *plane_state)
953 {
954         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
955         const struct drm_framebuffer *fb = plane_state->hw.fb;
956
957         switch (fb->modifier) {
958         case DRM_FORMAT_MOD_LINEAR:
959         case I915_FORMAT_MOD_Y_TILED:
960         case I915_FORMAT_MOD_Yf_TILED:
961                 return DISPLAY_VER(i915) >= 9;
962         case I915_FORMAT_MOD_4_TILED:
963         case I915_FORMAT_MOD_X_TILED:
964                 return true;
965         default:
966                 return false;
967         }
968 }
969
970 static void intel_fbc_update_state(struct intel_atomic_state *state,
971                                    struct intel_crtc *crtc,
972                                    struct intel_plane *plane)
973 {
974         struct drm_i915_private *i915 = to_i915(state->base.dev);
975         const struct intel_crtc_state *crtc_state =
976                 intel_atomic_get_new_crtc_state(state, crtc);
977         const struct intel_plane_state *plane_state =
978                 intel_atomic_get_new_plane_state(state, plane);
979         struct intel_fbc *fbc = plane->fbc;
980         struct intel_fbc_state *fbc_state = &fbc->state;
981
982         WARN_ON(plane_state->no_fbc_reason);
983         WARN_ON(fbc_state->plane && fbc_state->plane != plane);
984
985         fbc_state->plane = plane;
986
987         /* FBC1 compression interval: arbitrary choice of 1 second */
988         fbc_state->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode);
989
990         fbc_state->fence_y_offset = intel_plane_fence_y_offset(plane_state);
991
992         drm_WARN_ON(&i915->drm, plane_state->flags & PLANE_HAS_FENCE &&
993                     !plane_state->ggtt_vma->fence);
994
995         if (plane_state->flags & PLANE_HAS_FENCE &&
996             plane_state->ggtt_vma->fence)
997                 fbc_state->fence_id = plane_state->ggtt_vma->fence->id;
998         else
999                 fbc_state->fence_id = -1;
1000
1001         fbc_state->cfb_stride = intel_fbc_cfb_stride(plane_state);
1002         fbc_state->cfb_size = intel_fbc_cfb_size(plane_state);
1003         fbc_state->override_cfb_stride = intel_fbc_override_cfb_stride(plane_state);
1004 }
1005
1006 static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state)
1007 {
1008         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
1009
1010         /*
1011          * The use of a CPU fence is one of two ways to detect writes by the
1012          * CPU to the scanout and trigger updates to the FBC.
1013          *
1014          * The other method is by software tracking (see
1015          * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke
1016          * the current compressed buffer and recompress it.
1017          *
1018          * Note that is possible for a tiled surface to be unmappable (and
1019          * so have no fence associated with it) due to aperture constraints
1020          * at the time of pinning.
1021          */
1022         return DISPLAY_VER(i915) >= 9 ||
1023                 (plane_state->flags & PLANE_HAS_FENCE &&
1024                  plane_state->ggtt_vma->fence);
1025 }
1026
1027 static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state)
1028 {
1029         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1030         struct intel_fbc *fbc = plane->fbc;
1031
1032         return intel_fbc_min_limit(plane_state) <= fbc->limit &&
1033                 intel_fbc_cfb_size(plane_state) <= fbc->compressed_fb.size * fbc->limit;
1034 }
1035
1036 static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state)
1037 {
1038         return !plane_state->no_fbc_reason &&
1039                 intel_fbc_is_fence_ok(plane_state) &&
1040                 intel_fbc_is_cfb_ok(plane_state);
1041 }
1042
1043 static int intel_fbc_check_plane(struct intel_atomic_state *state,
1044                                  struct intel_plane *plane)
1045 {
1046         struct drm_i915_private *i915 = to_i915(state->base.dev);
1047         struct intel_plane_state *plane_state =
1048                 intel_atomic_get_new_plane_state(state, plane);
1049         const struct drm_framebuffer *fb = plane_state->hw.fb;
1050         struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
1051         const struct intel_crtc_state *crtc_state;
1052         struct intel_fbc *fbc = plane->fbc;
1053
1054         if (!fbc)
1055                 return 0;
1056
1057         if (intel_vgpu_active(i915)) {
1058                 plane_state->no_fbc_reason = "VGPU active";
1059                 return 0;
1060         }
1061
1062         if (!i915->params.enable_fbc) {
1063                 plane_state->no_fbc_reason = "disabled per module param or by default";
1064                 return 0;
1065         }
1066
1067         if (!plane_state->uapi.visible) {
1068                 plane_state->no_fbc_reason = "plane not visible";
1069                 return 0;
1070         }
1071
1072         crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1073
1074         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
1075                 plane_state->no_fbc_reason = "interlaced mode not supported";
1076                 return 0;
1077         }
1078
1079         if (crtc_state->double_wide) {
1080                 plane_state->no_fbc_reason = "double wide pipe not supported";
1081                 return 0;
1082         }
1083
1084         /*
1085          * Display 12+ is not supporting FBC with PSR2.
1086          * Recommendation is to keep this combination disabled
1087          * Bspec: 50422 HSD: 14010260002
1088          */
1089         if (DISPLAY_VER(i915) >= 12 && crtc_state->has_psr2) {
1090                 plane_state->no_fbc_reason = "PSR2 enabled";
1091                 return 0;
1092         }
1093
1094         /* Wa_14016291713 */
1095         if ((IS_DISPLAY_VER(i915, 12, 13) ||
1096              IS_MTL_DISPLAY_STEP(i915, STEP_A0, STEP_C0)) &&
1097             crtc_state->has_psr) {
1098                 plane_state->no_fbc_reason = "PSR1 enabled (Wa_14016291713)";
1099                 return 0;
1100         }
1101
1102         if (!pixel_format_is_valid(plane_state)) {
1103                 plane_state->no_fbc_reason = "pixel format not supported";
1104                 return 0;
1105         }
1106
1107         if (!tiling_is_valid(plane_state)) {
1108                 plane_state->no_fbc_reason = "tiling not supported";
1109                 return 0;
1110         }
1111
1112         if (!rotation_is_valid(plane_state)) {
1113                 plane_state->no_fbc_reason = "rotation not supported";
1114                 return 0;
1115         }
1116
1117         if (!stride_is_valid(plane_state)) {
1118                 plane_state->no_fbc_reason = "stride not supported";
1119                 return 0;
1120         }
1121
1122         if (plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
1123             fb->format->has_alpha) {
1124                 plane_state->no_fbc_reason = "per-pixel alpha not supported";
1125                 return 0;
1126         }
1127
1128         if (!intel_fbc_hw_tracking_covers_screen(plane_state)) {
1129                 plane_state->no_fbc_reason = "plane size too big";
1130                 return 0;
1131         }
1132
1133         /*
1134          * Work around a problem on GEN9+ HW, where enabling FBC on a plane
1135          * having a Y offset that isn't divisible by 4 causes FIFO underrun
1136          * and screen flicker.
1137          */
1138         if (DISPLAY_VER(i915) >= 9 &&
1139             plane_state->view.color_plane[0].y & 3) {
1140                 plane_state->no_fbc_reason = "plane start Y offset misaligned";
1141                 return 0;
1142         }
1143
1144         /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
1145         if (DISPLAY_VER(i915) >= 11 &&
1146             (plane_state->view.color_plane[0].y +
1147              (drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) {
1148                 plane_state->no_fbc_reason = "plane end Y offset misaligned";
1149                 return 0;
1150         }
1151
1152         /* WaFbcExceedCdClockThreshold:hsw,bdw */
1153         if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
1154                 const struct intel_cdclk_state *cdclk_state;
1155
1156                 cdclk_state = intel_atomic_get_cdclk_state(state);
1157                 if (IS_ERR(cdclk_state))
1158                         return PTR_ERR(cdclk_state);
1159
1160                 if (crtc_state->pixel_rate >= cdclk_state->logical.cdclk * 95 / 100) {
1161                         plane_state->no_fbc_reason = "pixel rate too high";
1162                         return 0;
1163                 }
1164         }
1165
1166         plane_state->no_fbc_reason = NULL;
1167
1168         return 0;
1169 }
1170
1171
1172 static bool intel_fbc_can_flip_nuke(struct intel_atomic_state *state,
1173                                     struct intel_crtc *crtc,
1174                                     struct intel_plane *plane)
1175 {
1176         const struct intel_crtc_state *new_crtc_state =
1177                 intel_atomic_get_new_crtc_state(state, crtc);
1178         const struct intel_plane_state *old_plane_state =
1179                 intel_atomic_get_old_plane_state(state, plane);
1180         const struct intel_plane_state *new_plane_state =
1181                 intel_atomic_get_new_plane_state(state, plane);
1182         const struct drm_framebuffer *old_fb = old_plane_state->hw.fb;
1183         const struct drm_framebuffer *new_fb = new_plane_state->hw.fb;
1184
1185         if (intel_crtc_needs_modeset(new_crtc_state))
1186                 return false;
1187
1188         if (!intel_fbc_is_ok(old_plane_state) ||
1189             !intel_fbc_is_ok(new_plane_state))
1190                 return false;
1191
1192         if (old_fb->format->format != new_fb->format->format)
1193                 return false;
1194
1195         if (old_fb->modifier != new_fb->modifier)
1196                 return false;
1197
1198         if (intel_fbc_plane_stride(old_plane_state) !=
1199             intel_fbc_plane_stride(new_plane_state))
1200                 return false;
1201
1202         if (intel_fbc_cfb_stride(old_plane_state) !=
1203             intel_fbc_cfb_stride(new_plane_state))
1204                 return false;
1205
1206         if (intel_fbc_cfb_size(old_plane_state) !=
1207             intel_fbc_cfb_size(new_plane_state))
1208                 return false;
1209
1210         if (intel_fbc_override_cfb_stride(old_plane_state) !=
1211             intel_fbc_override_cfb_stride(new_plane_state))
1212                 return false;
1213
1214         return true;
1215 }
1216
1217 static bool __intel_fbc_pre_update(struct intel_atomic_state *state,
1218                                    struct intel_crtc *crtc,
1219                                    struct intel_plane *plane)
1220 {
1221         struct drm_i915_private *i915 = to_i915(state->base.dev);
1222         struct intel_fbc *fbc = plane->fbc;
1223         bool need_vblank_wait = false;
1224
1225         lockdep_assert_held(&fbc->lock);
1226
1227         fbc->flip_pending = true;
1228
1229         if (intel_fbc_can_flip_nuke(state, crtc, plane))
1230                 return need_vblank_wait;
1231
1232         intel_fbc_deactivate(fbc, "update pending");
1233
1234         /*
1235          * Display WA #1198: glk+
1236          * Need an extra vblank wait between FBC disable and most plane
1237          * updates. Bspec says this is only needed for plane disable, but
1238          * that is not true. Touching most plane registers will cause the
1239          * corruption to appear. Also SKL/derivatives do not seem to be
1240          * affected.
1241          *
1242          * TODO: could optimize this a bit by sampling the frame
1243          * counter when we disable FBC (if it was already done earlier)
1244          * and skipping the extra vblank wait before the plane update
1245          * if at least one frame has already passed.
1246          */
1247         if (fbc->activated && DISPLAY_VER(i915) >= 10)
1248                 need_vblank_wait = true;
1249         fbc->activated = false;
1250
1251         return need_vblank_wait;
1252 }
1253
1254 bool intel_fbc_pre_update(struct intel_atomic_state *state,
1255                           struct intel_crtc *crtc)
1256 {
1257         const struct intel_plane_state *plane_state;
1258         bool need_vblank_wait = false;
1259         struct intel_plane *plane;
1260         int i;
1261
1262         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1263                 struct intel_fbc *fbc = plane->fbc;
1264
1265                 if (!fbc || plane->pipe != crtc->pipe)
1266                         continue;
1267
1268                 mutex_lock(&fbc->lock);
1269
1270                 if (fbc->state.plane == plane)
1271                         need_vblank_wait |= __intel_fbc_pre_update(state, crtc, plane);
1272
1273                 mutex_unlock(&fbc->lock);
1274         }
1275
1276         return need_vblank_wait;
1277 }
1278
1279 static void __intel_fbc_disable(struct intel_fbc *fbc)
1280 {
1281         struct drm_i915_private *i915 = fbc->i915;
1282         struct intel_plane *plane = fbc->state.plane;
1283
1284         lockdep_assert_held(&fbc->lock);
1285         drm_WARN_ON(&i915->drm, fbc->active);
1286
1287         drm_dbg_kms(&i915->drm, "Disabling FBC on [PLANE:%d:%s]\n",
1288                     plane->base.base.id, plane->base.name);
1289
1290         __intel_fbc_cleanup_cfb(fbc);
1291
1292         fbc->state.plane = NULL;
1293         fbc->flip_pending = false;
1294         fbc->busy_bits = 0;
1295 }
1296
1297 static void __intel_fbc_post_update(struct intel_fbc *fbc)
1298 {
1299         lockdep_assert_held(&fbc->lock);
1300
1301         fbc->flip_pending = false;
1302
1303         if (!fbc->busy_bits)
1304                 intel_fbc_activate(fbc);
1305         else
1306                 intel_fbc_deactivate(fbc, "frontbuffer write");
1307 }
1308
1309 void intel_fbc_post_update(struct intel_atomic_state *state,
1310                            struct intel_crtc *crtc)
1311 {
1312         const struct intel_plane_state *plane_state;
1313         struct intel_plane *plane;
1314         int i;
1315
1316         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1317                 struct intel_fbc *fbc = plane->fbc;
1318
1319                 if (!fbc || plane->pipe != crtc->pipe)
1320                         continue;
1321
1322                 mutex_lock(&fbc->lock);
1323
1324                 if (fbc->state.plane == plane)
1325                         __intel_fbc_post_update(fbc);
1326
1327                 mutex_unlock(&fbc->lock);
1328         }
1329 }
1330
1331 static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
1332 {
1333         if (fbc->state.plane)
1334                 return fbc->state.plane->frontbuffer_bit;
1335         else
1336                 return 0;
1337 }
1338
1339 static void __intel_fbc_invalidate(struct intel_fbc *fbc,
1340                                    unsigned int frontbuffer_bits,
1341                                    enum fb_op_origin origin)
1342 {
1343         if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
1344                 return;
1345
1346         mutex_lock(&fbc->lock);
1347
1348         frontbuffer_bits &= intel_fbc_get_frontbuffer_bit(fbc);
1349         if (!frontbuffer_bits)
1350                 goto out;
1351
1352         fbc->busy_bits |= frontbuffer_bits;
1353         intel_fbc_deactivate(fbc, "frontbuffer write");
1354
1355 out:
1356         mutex_unlock(&fbc->lock);
1357 }
1358
1359 void intel_fbc_invalidate(struct drm_i915_private *i915,
1360                           unsigned int frontbuffer_bits,
1361                           enum fb_op_origin origin)
1362 {
1363         struct intel_fbc *fbc;
1364         enum intel_fbc_id fbc_id;
1365
1366         for_each_intel_fbc(i915, fbc, fbc_id)
1367                 __intel_fbc_invalidate(fbc, frontbuffer_bits, origin);
1368
1369 }
1370
1371 static void __intel_fbc_flush(struct intel_fbc *fbc,
1372                               unsigned int frontbuffer_bits,
1373                               enum fb_op_origin origin)
1374 {
1375         mutex_lock(&fbc->lock);
1376
1377         frontbuffer_bits &= intel_fbc_get_frontbuffer_bit(fbc);
1378         if (!frontbuffer_bits)
1379                 goto out;
1380
1381         fbc->busy_bits &= ~frontbuffer_bits;
1382
1383         if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
1384                 goto out;
1385
1386         if (fbc->busy_bits || fbc->flip_pending)
1387                 goto out;
1388
1389         if (fbc->active)
1390                 intel_fbc_nuke(fbc);
1391         else
1392                 intel_fbc_activate(fbc);
1393
1394 out:
1395         mutex_unlock(&fbc->lock);
1396 }
1397
1398 void intel_fbc_flush(struct drm_i915_private *i915,
1399                      unsigned int frontbuffer_bits,
1400                      enum fb_op_origin origin)
1401 {
1402         struct intel_fbc *fbc;
1403         enum intel_fbc_id fbc_id;
1404
1405         for_each_intel_fbc(i915, fbc, fbc_id)
1406                 __intel_fbc_flush(fbc, frontbuffer_bits, origin);
1407 }
1408
1409 int intel_fbc_atomic_check(struct intel_atomic_state *state)
1410 {
1411         struct intel_plane_state *plane_state;
1412         struct intel_plane *plane;
1413         int i;
1414
1415         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1416                 int ret;
1417
1418                 ret = intel_fbc_check_plane(state, plane);
1419                 if (ret)
1420                         return ret;
1421         }
1422
1423         return 0;
1424 }
1425
1426 static void __intel_fbc_enable(struct intel_atomic_state *state,
1427                                struct intel_crtc *crtc,
1428                                struct intel_plane *plane)
1429 {
1430         struct drm_i915_private *i915 = to_i915(state->base.dev);
1431         const struct intel_plane_state *plane_state =
1432                 intel_atomic_get_new_plane_state(state, plane);
1433         struct intel_fbc *fbc = plane->fbc;
1434
1435         lockdep_assert_held(&fbc->lock);
1436
1437         if (fbc->state.plane) {
1438                 if (fbc->state.plane != plane)
1439                         return;
1440
1441                 if (intel_fbc_is_ok(plane_state)) {
1442                         intel_fbc_update_state(state, crtc, plane);
1443                         return;
1444                 }
1445
1446                 __intel_fbc_disable(fbc);
1447         }
1448
1449         drm_WARN_ON(&i915->drm, fbc->active);
1450
1451         fbc->no_fbc_reason = plane_state->no_fbc_reason;
1452         if (fbc->no_fbc_reason)
1453                 return;
1454
1455         if (!intel_fbc_is_fence_ok(plane_state)) {
1456                 fbc->no_fbc_reason = "framebuffer not fenced";
1457                 return;
1458         }
1459
1460         if (fbc->underrun_detected) {
1461                 fbc->no_fbc_reason = "FIFO underrun";
1462                 return;
1463         }
1464
1465         if (intel_fbc_alloc_cfb(fbc, intel_fbc_cfb_size(plane_state),
1466                                 intel_fbc_min_limit(plane_state))) {
1467                 fbc->no_fbc_reason = "not enough stolen memory";
1468                 return;
1469         }
1470
1471         drm_dbg_kms(&i915->drm, "Enabling FBC on [PLANE:%d:%s]\n",
1472                     plane->base.base.id, plane->base.name);
1473         fbc->no_fbc_reason = "FBC enabled but not active yet\n";
1474
1475         intel_fbc_update_state(state, crtc, plane);
1476
1477         intel_fbc_program_workarounds(fbc);
1478         intel_fbc_program_cfb(fbc);
1479 }
1480
1481 /**
1482  * intel_fbc_disable - disable FBC if it's associated with crtc
1483  * @crtc: the CRTC
1484  *
1485  * This function disables FBC if it's associated with the provided CRTC.
1486  */
1487 void intel_fbc_disable(struct intel_crtc *crtc)
1488 {
1489         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1490         struct intel_plane *plane;
1491
1492         for_each_intel_plane(&i915->drm, plane) {
1493                 struct intel_fbc *fbc = plane->fbc;
1494
1495                 if (!fbc || plane->pipe != crtc->pipe)
1496                         continue;
1497
1498                 mutex_lock(&fbc->lock);
1499                 if (fbc->state.plane == plane)
1500                         __intel_fbc_disable(fbc);
1501                 mutex_unlock(&fbc->lock);
1502         }
1503 }
1504
1505 void intel_fbc_update(struct intel_atomic_state *state,
1506                       struct intel_crtc *crtc)
1507 {
1508         const struct intel_crtc_state *crtc_state =
1509                 intel_atomic_get_new_crtc_state(state, crtc);
1510         const struct intel_plane_state *plane_state;
1511         struct intel_plane *plane;
1512         int i;
1513
1514         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1515                 struct intel_fbc *fbc = plane->fbc;
1516
1517                 if (!fbc || plane->pipe != crtc->pipe)
1518                         continue;
1519
1520                 mutex_lock(&fbc->lock);
1521
1522                 if (intel_crtc_needs_fastset(crtc_state) &&
1523                     plane_state->no_fbc_reason) {
1524                         if (fbc->state.plane == plane)
1525                                 __intel_fbc_disable(fbc);
1526                 } else {
1527                         __intel_fbc_enable(state, crtc, plane);
1528                 }
1529
1530                 mutex_unlock(&fbc->lock);
1531         }
1532 }
1533
1534 static void intel_fbc_underrun_work_fn(struct work_struct *work)
1535 {
1536         struct intel_fbc *fbc = container_of(work, typeof(*fbc), underrun_work);
1537         struct drm_i915_private *i915 = fbc->i915;
1538
1539         mutex_lock(&fbc->lock);
1540
1541         /* Maybe we were scheduled twice. */
1542         if (fbc->underrun_detected || !fbc->state.plane)
1543                 goto out;
1544
1545         drm_dbg_kms(&i915->drm, "Disabling FBC due to FIFO underrun.\n");
1546         fbc->underrun_detected = true;
1547
1548         intel_fbc_deactivate(fbc, "FIFO underrun");
1549         if (!fbc->flip_pending)
1550                 intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(i915, fbc->state.plane->pipe));
1551         __intel_fbc_disable(fbc);
1552 out:
1553         mutex_unlock(&fbc->lock);
1554 }
1555
1556 static void __intel_fbc_reset_underrun(struct intel_fbc *fbc)
1557 {
1558         struct drm_i915_private *i915 = fbc->i915;
1559
1560         cancel_work_sync(&fbc->underrun_work);
1561
1562         mutex_lock(&fbc->lock);
1563
1564         if (fbc->underrun_detected) {
1565                 drm_dbg_kms(&i915->drm,
1566                             "Re-allowing FBC after fifo underrun\n");
1567                 fbc->no_fbc_reason = "FIFO underrun cleared";
1568         }
1569
1570         fbc->underrun_detected = false;
1571         mutex_unlock(&fbc->lock);
1572 }
1573
1574 /*
1575  * intel_fbc_reset_underrun - reset FBC fifo underrun status.
1576  * @i915: the i915 device
1577  *
1578  * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
1579  * want to re-enable FBC after an underrun to increase test coverage.
1580  */
1581 void intel_fbc_reset_underrun(struct drm_i915_private *i915)
1582 {
1583         struct intel_fbc *fbc;
1584         enum intel_fbc_id fbc_id;
1585
1586         for_each_intel_fbc(i915, fbc, fbc_id)
1587                 __intel_fbc_reset_underrun(fbc);
1588 }
1589
1590 static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
1591 {
1592         /*
1593          * There's no guarantee that underrun_detected won't be set to true
1594          * right after this check and before the work is scheduled, but that's
1595          * not a problem since we'll check it again under the work function
1596          * while FBC is locked. This check here is just to prevent us from
1597          * unnecessarily scheduling the work, and it relies on the fact that we
1598          * never switch underrun_detect back to false after it's true.
1599          */
1600         if (READ_ONCE(fbc->underrun_detected))
1601                 return;
1602
1603         schedule_work(&fbc->underrun_work);
1604 }
1605
1606 /**
1607  * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
1608  * @i915: i915 device
1609  *
1610  * Without FBC, most underruns are harmless and don't really cause too many
1611  * problems, except for an annoying message on dmesg. With FBC, underruns can
1612  * become black screens or even worse, especially when paired with bad
1613  * watermarks. So in order for us to be on the safe side, completely disable FBC
1614  * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
1615  * already suggests that watermarks may be bad, so try to be as safe as
1616  * possible.
1617  *
1618  * This function is called from the IRQ handler.
1619  */
1620 void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915)
1621 {
1622         struct intel_fbc *fbc;
1623         enum intel_fbc_id fbc_id;
1624
1625         for_each_intel_fbc(i915, fbc, fbc_id)
1626                 __intel_fbc_handle_fifo_underrun_irq(fbc);
1627 }
1628
1629 /*
1630  * The DDX driver changes its behavior depending on the value it reads from
1631  * i915.enable_fbc, so sanitize it by translating the default value into either
1632  * 0 or 1 in order to allow it to know what's going on.
1633  *
1634  * Notice that this is done at driver initialization and we still allow user
1635  * space to change the value during runtime without sanitizing it again. IGT
1636  * relies on being able to change i915.enable_fbc at runtime.
1637  */
1638 static int intel_sanitize_fbc_option(struct drm_i915_private *i915)
1639 {
1640         if (i915->params.enable_fbc >= 0)
1641                 return !!i915->params.enable_fbc;
1642
1643         if (!HAS_FBC(i915))
1644                 return 0;
1645
1646         if (IS_BROADWELL(i915) || DISPLAY_VER(i915) >= 9)
1647                 return 1;
1648
1649         return 0;
1650 }
1651
1652 static bool need_fbc_vtd_wa(struct drm_i915_private *i915)
1653 {
1654         /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
1655         if (i915_vtd_active(i915) &&
1656             (IS_SKYLAKE(i915) || IS_BROXTON(i915))) {
1657                 drm_info(&i915->drm,
1658                          "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
1659                 return true;
1660         }
1661
1662         return false;
1663 }
1664
1665 void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane)
1666 {
1667         plane->fbc = fbc;
1668 }
1669
1670 static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915,
1671                                           enum intel_fbc_id fbc_id)
1672 {
1673         struct intel_fbc *fbc;
1674
1675         fbc = kzalloc(sizeof(*fbc), GFP_KERNEL);
1676         if (!fbc)
1677                 return NULL;
1678
1679         fbc->id = fbc_id;
1680         fbc->i915 = i915;
1681         INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
1682         mutex_init(&fbc->lock);
1683
1684         if (DISPLAY_VER(i915) >= 7)
1685                 fbc->funcs = &ivb_fbc_funcs;
1686         else if (DISPLAY_VER(i915) == 6)
1687                 fbc->funcs = &snb_fbc_funcs;
1688         else if (DISPLAY_VER(i915) == 5)
1689                 fbc->funcs = &ilk_fbc_funcs;
1690         else if (IS_G4X(i915))
1691                 fbc->funcs = &g4x_fbc_funcs;
1692         else if (DISPLAY_VER(i915) == 4)
1693                 fbc->funcs = &i965_fbc_funcs;
1694         else
1695                 fbc->funcs = &i8xx_fbc_funcs;
1696
1697         return fbc;
1698 }
1699
1700 /**
1701  * intel_fbc_init - Initialize FBC
1702  * @i915: the i915 device
1703  *
1704  * This function might be called during PM init process.
1705  */
1706 void intel_fbc_init(struct drm_i915_private *i915)
1707 {
1708         enum intel_fbc_id fbc_id;
1709
1710         if (!drm_mm_initialized(&i915->mm.stolen))
1711                 DISPLAY_RUNTIME_INFO(i915)->fbc_mask = 0;
1712
1713         if (need_fbc_vtd_wa(i915))
1714                 DISPLAY_RUNTIME_INFO(i915)->fbc_mask = 0;
1715
1716         i915->params.enable_fbc = intel_sanitize_fbc_option(i915);
1717         drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n",
1718                     i915->params.enable_fbc);
1719
1720         for_each_fbc_id(i915, fbc_id)
1721                 i915->display.fbc[fbc_id] = intel_fbc_create(i915, fbc_id);
1722 }
1723
1724 /**
1725  * intel_fbc_sanitize - Sanitize FBC
1726  * @i915: the i915 device
1727  *
1728  * Make sure FBC is initially disabled since we have no
1729  * idea eg. into which parts of stolen it might be scribbling
1730  * into.
1731  */
1732 void intel_fbc_sanitize(struct drm_i915_private *i915)
1733 {
1734         struct intel_fbc *fbc;
1735         enum intel_fbc_id fbc_id;
1736
1737         for_each_intel_fbc(i915, fbc, fbc_id) {
1738                 if (intel_fbc_hw_is_active(fbc))
1739                         intel_fbc_hw_deactivate(fbc);
1740         }
1741 }
1742
1743 static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
1744 {
1745         struct intel_fbc *fbc = m->private;
1746         struct drm_i915_private *i915 = fbc->i915;
1747         struct intel_plane *plane;
1748         intel_wakeref_t wakeref;
1749
1750         drm_modeset_lock_all(&i915->drm);
1751
1752         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1753         mutex_lock(&fbc->lock);
1754
1755         if (fbc->active) {
1756                 seq_puts(m, "FBC enabled\n");
1757                 seq_printf(m, "Compressing: %s\n",
1758                            str_yes_no(intel_fbc_is_compressing(fbc)));
1759         } else {
1760                 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1761         }
1762
1763         for_each_intel_plane(&i915->drm, plane) {
1764                 const struct intel_plane_state *plane_state =
1765                         to_intel_plane_state(plane->base.state);
1766
1767                 if (plane->fbc != fbc)
1768                         continue;
1769
1770                 seq_printf(m, "%c [PLANE:%d:%s]: %s\n",
1771                            fbc->state.plane == plane ? '*' : ' ',
1772                            plane->base.base.id, plane->base.name,
1773                            plane_state->no_fbc_reason ?: "FBC possible");
1774         }
1775
1776         mutex_unlock(&fbc->lock);
1777         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1778
1779         drm_modeset_unlock_all(&i915->drm);
1780
1781         return 0;
1782 }
1783
1784 DEFINE_SHOW_ATTRIBUTE(intel_fbc_debugfs_status);
1785
1786 static int intel_fbc_debugfs_false_color_get(void *data, u64 *val)
1787 {
1788         struct intel_fbc *fbc = data;
1789
1790         *val = fbc->false_color;
1791
1792         return 0;
1793 }
1794
1795 static int intel_fbc_debugfs_false_color_set(void *data, u64 val)
1796 {
1797         struct intel_fbc *fbc = data;
1798
1799         mutex_lock(&fbc->lock);
1800
1801         fbc->false_color = val;
1802
1803         if (fbc->active)
1804                 fbc->funcs->set_false_color(fbc, fbc->false_color);
1805
1806         mutex_unlock(&fbc->lock);
1807
1808         return 0;
1809 }
1810
1811 DEFINE_DEBUGFS_ATTRIBUTE(intel_fbc_debugfs_false_color_fops,
1812                          intel_fbc_debugfs_false_color_get,
1813                          intel_fbc_debugfs_false_color_set,
1814                          "%llu\n");
1815
1816 static void intel_fbc_debugfs_add(struct intel_fbc *fbc,
1817                                   struct dentry *parent)
1818 {
1819         debugfs_create_file("i915_fbc_status", 0444, parent,
1820                             fbc, &intel_fbc_debugfs_status_fops);
1821
1822         if (fbc->funcs->set_false_color)
1823                 debugfs_create_file_unsafe("i915_fbc_false_color", 0644, parent,
1824                                            fbc, &intel_fbc_debugfs_false_color_fops);
1825 }
1826
1827 void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc)
1828 {
1829         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1830
1831         if (plane->fbc)
1832                 intel_fbc_debugfs_add(plane->fbc, crtc->base.debugfs_entry);
1833 }
1834
1835 /* FIXME: remove this once igt is on board with per-crtc stuff */
1836 void intel_fbc_debugfs_register(struct drm_i915_private *i915)
1837 {
1838         struct drm_minor *minor = i915->drm.primary;
1839         struct intel_fbc *fbc;
1840
1841         fbc = i915->display.fbc[INTEL_FBC_A];
1842         if (fbc)
1843                 intel_fbc_debugfs_add(fbc, minor->debugfs_root);
1844 }
This page took 0.143473 seconds and 4 git commands to generate.