]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/display/intel_fbc.c
net: wan: Add framer framework support
[linux.git] / drivers / gpu / drm / i915 / display / intel_fbc.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23
24 /**
25  * DOC: Frame Buffer Compression (FBC)
26  *
27  * FBC tries to save memory bandwidth (and so power consumption) by
28  * compressing the amount of memory used by the display. It is total
29  * transparent to user space and completely handled in the kernel.
30  *
31  * The benefits of FBC are mostly visible with solid backgrounds and
32  * variation-less patterns. It comes from keeping the memory footprint small
33  * and having fewer memory pages opened and accessed for refreshing the display.
34  *
35  * i915 is responsible to reserve stolen memory for FBC and configure its
36  * offset on proper registers. The hardware takes care of all
37  * compress/decompress. However there are many known cases where we have to
38  * forcibly disable it to allow proper screen updates.
39  */
40
41 #include <linux/string_helpers.h>
42
43 #include <drm/drm_blend.h>
44 #include <drm/drm_fourcc.h>
45
46 #include "i915_drv.h"
47 #include "i915_reg.h"
48 #include "i915_utils.h"
49 #include "i915_vgpu.h"
50 #include "i915_vma.h"
51 #include "intel_cdclk.h"
52 #include "intel_de.h"
53 #include "intel_display_device.h"
54 #include "intel_display_trace.h"
55 #include "intel_display_types.h"
56 #include "intel_fbc.h"
57 #include "intel_frontbuffer.h"
58
59 #define for_each_fbc_id(__dev_priv, __fbc_id) \
60         for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++) \
61                 for_each_if(DISPLAY_RUNTIME_INFO(__dev_priv)->fbc_mask & BIT(__fbc_id))
62
63 #define for_each_intel_fbc(__dev_priv, __fbc, __fbc_id) \
64         for_each_fbc_id((__dev_priv), (__fbc_id)) \
65                 for_each_if((__fbc) = (__dev_priv)->display.fbc[(__fbc_id)])
66
67 struct intel_fbc_funcs {
68         void (*activate)(struct intel_fbc *fbc);
69         void (*deactivate)(struct intel_fbc *fbc);
70         bool (*is_active)(struct intel_fbc *fbc);
71         bool (*is_compressing)(struct intel_fbc *fbc);
72         void (*nuke)(struct intel_fbc *fbc);
73         void (*program_cfb)(struct intel_fbc *fbc);
74         void (*set_false_color)(struct intel_fbc *fbc, bool enable);
75 };
76
77 struct intel_fbc_state {
78         struct intel_plane *plane;
79         unsigned int cfb_stride;
80         unsigned int cfb_size;
81         unsigned int fence_y_offset;
82         u16 override_cfb_stride;
83         u16 interval;
84         s8 fence_id;
85 };
86
87 struct intel_fbc {
88         struct drm_i915_private *i915;
89         const struct intel_fbc_funcs *funcs;
90
91         /*
92          * This is always the inner lock when overlapping with
93          * struct_mutex and it's the outer lock when overlapping
94          * with stolen_lock.
95          */
96         struct mutex lock;
97         unsigned int busy_bits;
98
99         struct i915_stolen_fb compressed_fb, compressed_llb;
100
101         enum intel_fbc_id id;
102
103         u8 limit;
104
105         bool false_color;
106
107         bool active;
108         bool activated;
109         bool flip_pending;
110
111         bool underrun_detected;
112         struct work_struct underrun_work;
113
114         /*
115          * This structure contains everything that's relevant to program the
116          * hardware registers. When we want to figure out if we need to disable
117          * and re-enable FBC for a new configuration we just check if there's
118          * something different in the struct. The genx_fbc_activate functions
119          * are supposed to read from it in order to program the registers.
120          */
121         struct intel_fbc_state state;
122         const char *no_fbc_reason;
123 };
124
125 /* plane stride in pixels */
126 static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane_state)
127 {
128         const struct drm_framebuffer *fb = plane_state->hw.fb;
129         unsigned int stride;
130
131         stride = plane_state->view.color_plane[0].mapping_stride;
132         if (!drm_rotation_90_or_270(plane_state->hw.rotation))
133                 stride /= fb->format->cpp[0];
134
135         return stride;
136 }
137
138 /* plane stride based cfb stride in bytes, assuming 1:1 compression limit */
139 static unsigned int _intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
140 {
141         unsigned int cpp = 4; /* FBC always 4 bytes per pixel */
142
143         return intel_fbc_plane_stride(plane_state) * cpp;
144 }
145
146 /* minimum acceptable cfb stride in bytes, assuming 1:1 compression limit */
147 static unsigned int skl_fbc_min_cfb_stride(const struct intel_plane_state *plane_state)
148 {
149         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
150         unsigned int limit = 4; /* 1:4 compression limit is the worst case */
151         unsigned int cpp = 4; /* FBC always 4 bytes per pixel */
152         unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16;
153         unsigned int height = 4; /* FBC segment is 4 lines */
154         unsigned int stride;
155
156         /* minimum segment stride we can use */
157         stride = width * cpp * height / limit;
158
159         /*
160          * Wa_16011863758: icl+
161          * Avoid some hardware segment address miscalculation.
162          */
163         if (DISPLAY_VER(i915) >= 11)
164                 stride += 64;
165
166         /*
167          * At least some of the platforms require each 4 line segment to
168          * be 512 byte aligned. Just do it always for simplicity.
169          */
170         stride = ALIGN(stride, 512);
171
172         /* convert back to single line equivalent with 1:1 compression limit */
173         return stride * limit / height;
174 }
175
176 /* properly aligned cfb stride in bytes, assuming 1:1 compression limit */
177 static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
178 {
179         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
180         unsigned int stride = _intel_fbc_cfb_stride(plane_state);
181
182         /*
183          * At least some of the platforms require each 4 line segment to
184          * be 512 byte aligned. Aligning each line to 512 bytes guarantees
185          * that regardless of the compression limit we choose later.
186          */
187         if (DISPLAY_VER(i915) >= 9)
188                 return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(plane_state));
189         else
190                 return stride;
191 }
192
193 static unsigned int intel_fbc_cfb_size(const struct intel_plane_state *plane_state)
194 {
195         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
196         int lines = drm_rect_height(&plane_state->uapi.src) >> 16;
197
198         if (DISPLAY_VER(i915) == 7)
199                 lines = min(lines, 2048);
200         else if (DISPLAY_VER(i915) >= 8)
201                 lines = min(lines, 2560);
202
203         return lines * intel_fbc_cfb_stride(plane_state);
204 }
205
206 static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_state)
207 {
208         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
209         unsigned int stride_aligned = intel_fbc_cfb_stride(plane_state);
210         unsigned int stride = _intel_fbc_cfb_stride(plane_state);
211         const struct drm_framebuffer *fb = plane_state->hw.fb;
212
213         /*
214          * Override stride in 64 byte units per 4 line segment.
215          *
216          * Gen9 hw miscalculates cfb stride for linear as
217          * PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so
218          * we always need to use the override there.
219          */
220         if (stride != stride_aligned ||
221             (DISPLAY_VER(i915) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR))
222                 return stride_aligned * 4 / 64;
223
224         return 0;
225 }
226
227 static u32 i8xx_fbc_ctl(struct intel_fbc *fbc)
228 {
229         const struct intel_fbc_state *fbc_state = &fbc->state;
230         struct drm_i915_private *i915 = fbc->i915;
231         unsigned int cfb_stride;
232         u32 fbc_ctl;
233
234         cfb_stride = fbc_state->cfb_stride / fbc->limit;
235
236         /* FBC_CTL wants 32B or 64B units */
237         if (DISPLAY_VER(i915) == 2)
238                 cfb_stride = (cfb_stride / 32) - 1;
239         else
240                 cfb_stride = (cfb_stride / 64) - 1;
241
242         fbc_ctl = FBC_CTL_PERIODIC |
243                 FBC_CTL_INTERVAL(fbc_state->interval) |
244                 FBC_CTL_STRIDE(cfb_stride);
245
246         if (IS_I945GM(i915))
247                 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
248
249         if (fbc_state->fence_id >= 0)
250                 fbc_ctl |= FBC_CTL_FENCENO(fbc_state->fence_id);
251
252         return fbc_ctl;
253 }
254
255 static u32 i965_fbc_ctl2(struct intel_fbc *fbc)
256 {
257         const struct intel_fbc_state *fbc_state = &fbc->state;
258         u32 fbc_ctl2;
259
260         fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM |
261                 FBC_CTL_PLANE(fbc_state->plane->i9xx_plane);
262
263         if (fbc_state->fence_id >= 0)
264                 fbc_ctl2 |= FBC_CTL_CPU_FENCE_EN;
265
266         return fbc_ctl2;
267 }
268
269 static void i8xx_fbc_deactivate(struct intel_fbc *fbc)
270 {
271         struct drm_i915_private *i915 = fbc->i915;
272         u32 fbc_ctl;
273
274         /* Disable compression */
275         fbc_ctl = intel_de_read(i915, FBC_CONTROL);
276         if ((fbc_ctl & FBC_CTL_EN) == 0)
277                 return;
278
279         fbc_ctl &= ~FBC_CTL_EN;
280         intel_de_write(i915, FBC_CONTROL, fbc_ctl);
281
282         /* Wait for compressing bit to clear */
283         if (intel_de_wait_for_clear(i915, FBC_STATUS,
284                                     FBC_STAT_COMPRESSING, 10)) {
285                 drm_dbg_kms(&i915->drm, "FBC idle timed out\n");
286                 return;
287         }
288 }
289
290 static void i8xx_fbc_activate(struct intel_fbc *fbc)
291 {
292         const struct intel_fbc_state *fbc_state = &fbc->state;
293         struct drm_i915_private *i915 = fbc->i915;
294         int i;
295
296         /* Clear old tags */
297         for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
298                 intel_de_write(i915, FBC_TAG(i), 0);
299
300         if (DISPLAY_VER(i915) == 4) {
301                 intel_de_write(i915, FBC_CONTROL2,
302                                i965_fbc_ctl2(fbc));
303                 intel_de_write(i915, FBC_FENCE_OFF,
304                                fbc_state->fence_y_offset);
305         }
306
307         intel_de_write(i915, FBC_CONTROL,
308                        FBC_CTL_EN | i8xx_fbc_ctl(fbc));
309 }
310
311 static bool i8xx_fbc_is_active(struct intel_fbc *fbc)
312 {
313         return intel_de_read(fbc->i915, FBC_CONTROL) & FBC_CTL_EN;
314 }
315
316 static bool i8xx_fbc_is_compressing(struct intel_fbc *fbc)
317 {
318         return intel_de_read(fbc->i915, FBC_STATUS) &
319                 (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
320 }
321
322 static void i8xx_fbc_nuke(struct intel_fbc *fbc)
323 {
324         struct intel_fbc_state *fbc_state = &fbc->state;
325         enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
326         struct drm_i915_private *dev_priv = fbc->i915;
327
328         intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
329                           intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane)));
330 }
331
332 static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
333 {
334         struct drm_i915_private *i915 = fbc->i915;
335
336         drm_WARN_ON(&i915->drm,
337                     range_overflows_end_t(u64, i915_gem_stolen_area_address(i915),
338                                           i915_gem_stolen_node_offset(&fbc->compressed_fb),
339                                           U32_MAX));
340         drm_WARN_ON(&i915->drm,
341                     range_overflows_end_t(u64, i915_gem_stolen_area_address(i915),
342                                           i915_gem_stolen_node_offset(&fbc->compressed_llb),
343                                           U32_MAX));
344         intel_de_write(i915, FBC_CFB_BASE,
345                        i915_gem_stolen_node_address(i915, &fbc->compressed_fb));
346         intel_de_write(i915, FBC_LL_BASE,
347                        i915_gem_stolen_node_address(i915, &fbc->compressed_llb));
348 }
349
350 static const struct intel_fbc_funcs i8xx_fbc_funcs = {
351         .activate = i8xx_fbc_activate,
352         .deactivate = i8xx_fbc_deactivate,
353         .is_active = i8xx_fbc_is_active,
354         .is_compressing = i8xx_fbc_is_compressing,
355         .nuke = i8xx_fbc_nuke,
356         .program_cfb = i8xx_fbc_program_cfb,
357 };
358
359 static void i965_fbc_nuke(struct intel_fbc *fbc)
360 {
361         struct intel_fbc_state *fbc_state = &fbc->state;
362         enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
363         struct drm_i915_private *dev_priv = fbc->i915;
364
365         intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
366                           intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane)));
367 }
368
369 static const struct intel_fbc_funcs i965_fbc_funcs = {
370         .activate = i8xx_fbc_activate,
371         .deactivate = i8xx_fbc_deactivate,
372         .is_active = i8xx_fbc_is_active,
373         .is_compressing = i8xx_fbc_is_compressing,
374         .nuke = i965_fbc_nuke,
375         .program_cfb = i8xx_fbc_program_cfb,
376 };
377
378 static u32 g4x_dpfc_ctl_limit(struct intel_fbc *fbc)
379 {
380         switch (fbc->limit) {
381         default:
382                 MISSING_CASE(fbc->limit);
383                 fallthrough;
384         case 1:
385                 return DPFC_CTL_LIMIT_1X;
386         case 2:
387                 return DPFC_CTL_LIMIT_2X;
388         case 4:
389                 return DPFC_CTL_LIMIT_4X;
390         }
391 }
392
393 static u32 g4x_dpfc_ctl(struct intel_fbc *fbc)
394 {
395         const struct intel_fbc_state *fbc_state = &fbc->state;
396         struct drm_i915_private *i915 = fbc->i915;
397         u32 dpfc_ctl;
398
399         dpfc_ctl = g4x_dpfc_ctl_limit(fbc) |
400                 DPFC_CTL_PLANE_G4X(fbc_state->plane->i9xx_plane);
401
402         if (IS_G4X(i915))
403                 dpfc_ctl |= DPFC_CTL_SR_EN;
404
405         if (fbc_state->fence_id >= 0) {
406                 dpfc_ctl |= DPFC_CTL_FENCE_EN_G4X;
407
408                 if (DISPLAY_VER(i915) < 6)
409                         dpfc_ctl |= DPFC_CTL_FENCENO(fbc_state->fence_id);
410         }
411
412         return dpfc_ctl;
413 }
414
415 static void g4x_fbc_activate(struct intel_fbc *fbc)
416 {
417         const struct intel_fbc_state *fbc_state = &fbc->state;
418         struct drm_i915_private *i915 = fbc->i915;
419
420         intel_de_write(i915, DPFC_FENCE_YOFF,
421                        fbc_state->fence_y_offset);
422
423         intel_de_write(i915, DPFC_CONTROL,
424                        DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
425 }
426
427 static void g4x_fbc_deactivate(struct intel_fbc *fbc)
428 {
429         struct drm_i915_private *i915 = fbc->i915;
430         u32 dpfc_ctl;
431
432         /* Disable compression */
433         dpfc_ctl = intel_de_read(i915, DPFC_CONTROL);
434         if (dpfc_ctl & DPFC_CTL_EN) {
435                 dpfc_ctl &= ~DPFC_CTL_EN;
436                 intel_de_write(i915, DPFC_CONTROL, dpfc_ctl);
437         }
438 }
439
440 static bool g4x_fbc_is_active(struct intel_fbc *fbc)
441 {
442         return intel_de_read(fbc->i915, DPFC_CONTROL) & DPFC_CTL_EN;
443 }
444
445 static bool g4x_fbc_is_compressing(struct intel_fbc *fbc)
446 {
447         return intel_de_read(fbc->i915, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
448 }
449
450 static void g4x_fbc_program_cfb(struct intel_fbc *fbc)
451 {
452         struct drm_i915_private *i915 = fbc->i915;
453
454         intel_de_write(i915, DPFC_CB_BASE,
455                        i915_gem_stolen_node_offset(&fbc->compressed_fb));
456 }
457
458 static const struct intel_fbc_funcs g4x_fbc_funcs = {
459         .activate = g4x_fbc_activate,
460         .deactivate = g4x_fbc_deactivate,
461         .is_active = g4x_fbc_is_active,
462         .is_compressing = g4x_fbc_is_compressing,
463         .nuke = i965_fbc_nuke,
464         .program_cfb = g4x_fbc_program_cfb,
465 };
466
467 static void ilk_fbc_activate(struct intel_fbc *fbc)
468 {
469         struct intel_fbc_state *fbc_state = &fbc->state;
470         struct drm_i915_private *i915 = fbc->i915;
471
472         intel_de_write(i915, ILK_DPFC_FENCE_YOFF(fbc->id),
473                        fbc_state->fence_y_offset);
474
475         intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
476                        DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
477 }
478
479 static void ilk_fbc_deactivate(struct intel_fbc *fbc)
480 {
481         struct drm_i915_private *i915 = fbc->i915;
482         u32 dpfc_ctl;
483
484         /* Disable compression */
485         dpfc_ctl = intel_de_read(i915, ILK_DPFC_CONTROL(fbc->id));
486         if (dpfc_ctl & DPFC_CTL_EN) {
487                 dpfc_ctl &= ~DPFC_CTL_EN;
488                 intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
489         }
490 }
491
492 static bool ilk_fbc_is_active(struct intel_fbc *fbc)
493 {
494         return intel_de_read(fbc->i915, ILK_DPFC_CONTROL(fbc->id)) & DPFC_CTL_EN;
495 }
496
497 static bool ilk_fbc_is_compressing(struct intel_fbc *fbc)
498 {
499         return intel_de_read(fbc->i915, ILK_DPFC_STATUS(fbc->id)) & DPFC_COMP_SEG_MASK;
500 }
501
502 static void ilk_fbc_program_cfb(struct intel_fbc *fbc)
503 {
504         struct drm_i915_private *i915 = fbc->i915;
505
506         intel_de_write(i915, ILK_DPFC_CB_BASE(fbc->id),
507                        i915_gem_stolen_node_offset(&fbc->compressed_fb));
508 }
509
510 static const struct intel_fbc_funcs ilk_fbc_funcs = {
511         .activate = ilk_fbc_activate,
512         .deactivate = ilk_fbc_deactivate,
513         .is_active = ilk_fbc_is_active,
514         .is_compressing = ilk_fbc_is_compressing,
515         .nuke = i965_fbc_nuke,
516         .program_cfb = ilk_fbc_program_cfb,
517 };
518
519 static void snb_fbc_program_fence(struct intel_fbc *fbc)
520 {
521         const struct intel_fbc_state *fbc_state = &fbc->state;
522         struct drm_i915_private *i915 = fbc->i915;
523         u32 ctl = 0;
524
525         if (fbc_state->fence_id >= 0)
526                 ctl = SNB_DPFC_FENCE_EN | SNB_DPFC_FENCENO(fbc_state->fence_id);
527
528         intel_de_write(i915, SNB_DPFC_CTL_SA, ctl);
529         intel_de_write(i915, SNB_DPFC_CPU_FENCE_OFFSET, fbc_state->fence_y_offset);
530 }
531
532 static void snb_fbc_activate(struct intel_fbc *fbc)
533 {
534         snb_fbc_program_fence(fbc);
535
536         ilk_fbc_activate(fbc);
537 }
538
539 static void snb_fbc_nuke(struct intel_fbc *fbc)
540 {
541         struct drm_i915_private *i915 = fbc->i915;
542
543         intel_de_write(i915, MSG_FBC_REND_STATE(fbc->id), FBC_REND_NUKE);
544         intel_de_posting_read(i915, MSG_FBC_REND_STATE(fbc->id));
545 }
546
547 static const struct intel_fbc_funcs snb_fbc_funcs = {
548         .activate = snb_fbc_activate,
549         .deactivate = ilk_fbc_deactivate,
550         .is_active = ilk_fbc_is_active,
551         .is_compressing = ilk_fbc_is_compressing,
552         .nuke = snb_fbc_nuke,
553         .program_cfb = ilk_fbc_program_cfb,
554 };
555
556 static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc)
557 {
558         const struct intel_fbc_state *fbc_state = &fbc->state;
559         struct drm_i915_private *i915 = fbc->i915;
560         u32 val = 0;
561
562         if (fbc_state->override_cfb_stride)
563                 val |= FBC_STRIDE_OVERRIDE |
564                         FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit);
565
566         intel_de_write(i915, GLK_FBC_STRIDE(fbc->id), val);
567 }
568
569 static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc)
570 {
571         const struct intel_fbc_state *fbc_state = &fbc->state;
572         struct drm_i915_private *i915 = fbc->i915;
573         u32 val = 0;
574
575         /* Display WA #0529: skl, kbl, bxt. */
576         if (fbc_state->override_cfb_stride)
577                 val |= CHICKEN_FBC_STRIDE_OVERRIDE |
578                         CHICKEN_FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit);
579
580         intel_de_rmw(i915, CHICKEN_MISC_4,
581                      CHICKEN_FBC_STRIDE_OVERRIDE |
582                      CHICKEN_FBC_STRIDE_MASK, val);
583 }
584
585 static u32 ivb_dpfc_ctl(struct intel_fbc *fbc)
586 {
587         const struct intel_fbc_state *fbc_state = &fbc->state;
588         struct drm_i915_private *i915 = fbc->i915;
589         u32 dpfc_ctl;
590
591         dpfc_ctl = g4x_dpfc_ctl_limit(fbc);
592
593         if (IS_IVYBRIDGE(i915))
594                 dpfc_ctl |= DPFC_CTL_PLANE_IVB(fbc_state->plane->i9xx_plane);
595
596         if (DISPLAY_VER(i915) >= 20)
597                 dpfc_ctl |= DPFC_CTL_PLANE_BINDING(fbc_state->plane->id);
598
599         if (fbc_state->fence_id >= 0)
600                 dpfc_ctl |= DPFC_CTL_FENCE_EN_IVB;
601
602         if (fbc->false_color)
603                 dpfc_ctl |= DPFC_CTL_FALSE_COLOR;
604
605         return dpfc_ctl;
606 }
607
608 static void ivb_fbc_activate(struct intel_fbc *fbc)
609 {
610         struct drm_i915_private *i915 = fbc->i915;
611
612         if (DISPLAY_VER(i915) >= 10)
613                 glk_fbc_program_cfb_stride(fbc);
614         else if (DISPLAY_VER(i915) == 9)
615                 skl_fbc_program_cfb_stride(fbc);
616
617         if (intel_gt_support_legacy_fencing(to_gt(i915)))
618                 snb_fbc_program_fence(fbc);
619
620         intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
621                        DPFC_CTL_EN | ivb_dpfc_ctl(fbc));
622 }
623
624 static bool ivb_fbc_is_compressing(struct intel_fbc *fbc)
625 {
626         return intel_de_read(fbc->i915, ILK_DPFC_STATUS2(fbc->id)) & DPFC_COMP_SEG_MASK_IVB;
627 }
628
629 static void ivb_fbc_set_false_color(struct intel_fbc *fbc,
630                                     bool enable)
631 {
632         intel_de_rmw(fbc->i915, ILK_DPFC_CONTROL(fbc->id),
633                      DPFC_CTL_FALSE_COLOR, enable ? DPFC_CTL_FALSE_COLOR : 0);
634 }
635
636 static const struct intel_fbc_funcs ivb_fbc_funcs = {
637         .activate = ivb_fbc_activate,
638         .deactivate = ilk_fbc_deactivate,
639         .is_active = ilk_fbc_is_active,
640         .is_compressing = ivb_fbc_is_compressing,
641         .nuke = snb_fbc_nuke,
642         .program_cfb = ilk_fbc_program_cfb,
643         .set_false_color = ivb_fbc_set_false_color,
644 };
645
646 static bool intel_fbc_hw_is_active(struct intel_fbc *fbc)
647 {
648         return fbc->funcs->is_active(fbc);
649 }
650
651 static void intel_fbc_hw_activate(struct intel_fbc *fbc)
652 {
653         trace_intel_fbc_activate(fbc->state.plane);
654
655         fbc->active = true;
656         fbc->activated = true;
657
658         fbc->funcs->activate(fbc);
659 }
660
661 static void intel_fbc_hw_deactivate(struct intel_fbc *fbc)
662 {
663         trace_intel_fbc_deactivate(fbc->state.plane);
664
665         fbc->active = false;
666
667         fbc->funcs->deactivate(fbc);
668 }
669
670 static bool intel_fbc_is_compressing(struct intel_fbc *fbc)
671 {
672         return fbc->funcs->is_compressing(fbc);
673 }
674
675 static void intel_fbc_nuke(struct intel_fbc *fbc)
676 {
677         struct drm_i915_private *i915 = fbc->i915;
678
679         lockdep_assert_held(&fbc->lock);
680         drm_WARN_ON(&i915->drm, fbc->flip_pending);
681
682         trace_intel_fbc_nuke(fbc->state.plane);
683
684         fbc->funcs->nuke(fbc);
685 }
686
687 static void intel_fbc_activate(struct intel_fbc *fbc)
688 {
689         lockdep_assert_held(&fbc->lock);
690
691         intel_fbc_hw_activate(fbc);
692         intel_fbc_nuke(fbc);
693
694         fbc->no_fbc_reason = NULL;
695 }
696
697 static void intel_fbc_deactivate(struct intel_fbc *fbc, const char *reason)
698 {
699         lockdep_assert_held(&fbc->lock);
700
701         if (fbc->active)
702                 intel_fbc_hw_deactivate(fbc);
703
704         fbc->no_fbc_reason = reason;
705 }
706
707 static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915)
708 {
709         if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915))
710                 return BIT_ULL(28);
711         else
712                 return BIT_ULL(32);
713 }
714
715 static u64 intel_fbc_stolen_end(struct drm_i915_private *i915)
716 {
717         u64 end;
718
719         /* The FBC hardware for BDW/SKL doesn't have access to the stolen
720          * reserved range size, so it always assumes the maximum (8mb) is used.
721          * If we enable FBC using a CFB on that memory range we'll get FIFO
722          * underruns, even if that range is not reserved by the BIOS. */
723         if (IS_BROADWELL(i915) ||
724             (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)))
725                 end = i915_gem_stolen_area_size(i915) - 8 * 1024 * 1024;
726         else
727                 end = U64_MAX;
728
729         return min(end, intel_fbc_cfb_base_max(i915));
730 }
731
732 static int intel_fbc_min_limit(const struct intel_plane_state *plane_state)
733 {
734         return plane_state->hw.fb->format->cpp[0] == 2 ? 2 : 1;
735 }
736
737 static int intel_fbc_max_limit(struct drm_i915_private *i915)
738 {
739         /* WaFbcOnly1to1Ratio:ctg */
740         if (IS_G4X(i915))
741                 return 1;
742
743         /*
744          * FBC2 can only do 1:1, 1:2, 1:4, we limit
745          * FBC1 to the same out of convenience.
746          */
747         return 4;
748 }
749
750 static int find_compression_limit(struct intel_fbc *fbc,
751                                   unsigned int size, int min_limit)
752 {
753         struct drm_i915_private *i915 = fbc->i915;
754         u64 end = intel_fbc_stolen_end(i915);
755         int ret, limit = min_limit;
756
757         size /= limit;
758
759         /* Try to over-allocate to reduce reallocations and fragmentation. */
760         ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
761                                                    size <<= 1, 4096, 0, end);
762         if (ret == 0)
763                 return limit;
764
765         for (; limit <= intel_fbc_max_limit(i915); limit <<= 1) {
766                 ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
767                                                            size >>= 1, 4096, 0, end);
768                 if (ret == 0)
769                         return limit;
770         }
771
772         return 0;
773 }
774
775 static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
776                                unsigned int size, int min_limit)
777 {
778         struct drm_i915_private *i915 = fbc->i915;
779         int ret;
780
781         drm_WARN_ON(&i915->drm,
782                     i915_gem_stolen_node_allocated(&fbc->compressed_fb));
783         drm_WARN_ON(&i915->drm,
784                     i915_gem_stolen_node_allocated(&fbc->compressed_llb));
785
786         if (DISPLAY_VER(i915) < 5 && !IS_G4X(i915)) {
787                 ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb,
788                                                   4096, 4096);
789                 if (ret)
790                         goto err;
791         }
792
793         ret = find_compression_limit(fbc, size, min_limit);
794         if (!ret)
795                 goto err_llb;
796         else if (ret > min_limit)
797                 drm_info_once(&i915->drm,
798                               "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
799
800         fbc->limit = ret;
801
802         drm_dbg_kms(&i915->drm,
803                     "reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n",
804                     i915_gem_stolen_node_size(&fbc->compressed_fb), fbc->limit);
805         return 0;
806
807 err_llb:
808         if (i915_gem_stolen_node_allocated(&fbc->compressed_llb))
809                 i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
810 err:
811         if (i915_gem_stolen_initialized(i915))
812                 drm_info_once(&i915->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
813         return -ENOSPC;
814 }
815
816 static void intel_fbc_program_cfb(struct intel_fbc *fbc)
817 {
818         fbc->funcs->program_cfb(fbc);
819 }
820
821 static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
822 {
823         /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp,mtl */
824         if (DISPLAY_VER(fbc->i915) >= 11 && !IS_DG2(fbc->i915))
825                 intel_de_rmw(fbc->i915, ILK_DPFC_CHICKEN(fbc->id), 0,
826                              DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
827 }
828
829 static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
830 {
831         struct drm_i915_private *i915 = fbc->i915;
832
833         if (WARN_ON(intel_fbc_hw_is_active(fbc)))
834                 return;
835
836         if (i915_gem_stolen_node_allocated(&fbc->compressed_llb))
837                 i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
838         if (i915_gem_stolen_node_allocated(&fbc->compressed_fb))
839                 i915_gem_stolen_remove_node(i915, &fbc->compressed_fb);
840 }
841
842 void intel_fbc_cleanup(struct drm_i915_private *i915)
843 {
844         struct intel_fbc *fbc;
845         enum intel_fbc_id fbc_id;
846
847         for_each_intel_fbc(i915, fbc, fbc_id) {
848                 mutex_lock(&fbc->lock);
849                 __intel_fbc_cleanup_cfb(fbc);
850                 mutex_unlock(&fbc->lock);
851
852                 kfree(fbc);
853         }
854 }
855
856 static bool i8xx_fbc_stride_is_valid(const struct intel_plane_state *plane_state)
857 {
858         const struct drm_framebuffer *fb = plane_state->hw.fb;
859         unsigned int stride = intel_fbc_plane_stride(plane_state) *
860                 fb->format->cpp[0];
861
862         return stride == 4096 || stride == 8192;
863 }
864
865 static bool i965_fbc_stride_is_valid(const struct intel_plane_state *plane_state)
866 {
867         const struct drm_framebuffer *fb = plane_state->hw.fb;
868         unsigned int stride = intel_fbc_plane_stride(plane_state) *
869                 fb->format->cpp[0];
870
871         return stride >= 2048 && stride <= 16384;
872 }
873
874 static bool g4x_fbc_stride_is_valid(const struct intel_plane_state *plane_state)
875 {
876         return true;
877 }
878
879 static bool skl_fbc_stride_is_valid(const struct intel_plane_state *plane_state)
880 {
881         const struct drm_framebuffer *fb = plane_state->hw.fb;
882         unsigned int stride = intel_fbc_plane_stride(plane_state) *
883                 fb->format->cpp[0];
884
885         /* Display WA #1105: skl,bxt,kbl,cfl,glk */
886         if (fb->modifier == DRM_FORMAT_MOD_LINEAR && stride & 511)
887                 return false;
888
889         return true;
890 }
891
892 static bool icl_fbc_stride_is_valid(const struct intel_plane_state *plane_state)
893 {
894         return true;
895 }
896
897 static bool stride_is_valid(const struct intel_plane_state *plane_state)
898 {
899         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
900
901         if (DISPLAY_VER(i915) >= 11)
902                 return icl_fbc_stride_is_valid(plane_state);
903         else if (DISPLAY_VER(i915) >= 9)
904                 return skl_fbc_stride_is_valid(plane_state);
905         else if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915))
906                 return g4x_fbc_stride_is_valid(plane_state);
907         else if (DISPLAY_VER(i915) == 4)
908                 return i965_fbc_stride_is_valid(plane_state);
909         else
910                 return i8xx_fbc_stride_is_valid(plane_state);
911 }
912
913 static bool i8xx_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state)
914 {
915         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
916         const struct drm_framebuffer *fb = plane_state->hw.fb;
917
918         switch (fb->format->format) {
919         case DRM_FORMAT_XRGB8888:
920         case DRM_FORMAT_XBGR8888:
921                 return true;
922         case DRM_FORMAT_XRGB1555:
923         case DRM_FORMAT_RGB565:
924                 /* 16bpp not supported on gen2 */
925                 if (DISPLAY_VER(i915) == 2)
926                         return false;
927                 return true;
928         default:
929                 return false;
930         }
931 }
932
933 static bool g4x_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state)
934 {
935         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
936         const struct drm_framebuffer *fb = plane_state->hw.fb;
937
938         switch (fb->format->format) {
939         case DRM_FORMAT_XRGB8888:
940         case DRM_FORMAT_XBGR8888:
941                 return true;
942         case DRM_FORMAT_RGB565:
943                 /* WaFbcOnly1to1Ratio:ctg */
944                 if (IS_G4X(i915))
945                         return false;
946                 return true;
947         default:
948                 return false;
949         }
950 }
951
952 static bool lnl_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state)
953 {
954         const struct drm_framebuffer *fb = plane_state->hw.fb;
955
956         switch (fb->format->format) {
957         case DRM_FORMAT_XRGB8888:
958         case DRM_FORMAT_XBGR8888:
959         case DRM_FORMAT_ARGB8888:
960         case DRM_FORMAT_ABGR8888:
961         case DRM_FORMAT_RGB565:
962                 return true;
963         default:
964                 return false;
965         }
966 }
967
968 static bool pixel_format_is_valid(const struct intel_plane_state *plane_state)
969 {
970         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
971
972         if (DISPLAY_VER(i915) >= 20)
973                 return lnl_fbc_pixel_format_is_valid(plane_state);
974         else if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915))
975                 return g4x_fbc_pixel_format_is_valid(plane_state);
976         else
977                 return i8xx_fbc_pixel_format_is_valid(plane_state);
978 }
979
980 static bool i8xx_fbc_rotation_is_valid(const struct intel_plane_state *plane_state)
981 {
982         return plane_state->hw.rotation == DRM_MODE_ROTATE_0;
983 }
984
985 static bool g4x_fbc_rotation_is_valid(const struct intel_plane_state *plane_state)
986 {
987         return true;
988 }
989
990 static bool skl_fbc_rotation_is_valid(const struct intel_plane_state *plane_state)
991 {
992         const struct drm_framebuffer *fb = plane_state->hw.fb;
993         unsigned int rotation = plane_state->hw.rotation;
994
995         if (fb->format->format == DRM_FORMAT_RGB565 &&
996             drm_rotation_90_or_270(rotation))
997                 return false;
998
999         return true;
1000 }
1001
1002 static bool rotation_is_valid(const struct intel_plane_state *plane_state)
1003 {
1004         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
1005
1006         if (DISPLAY_VER(i915) >= 9)
1007                 return skl_fbc_rotation_is_valid(plane_state);
1008         else if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915))
1009                 return g4x_fbc_rotation_is_valid(plane_state);
1010         else
1011                 return i8xx_fbc_rotation_is_valid(plane_state);
1012 }
1013
1014 /*
1015  * For some reason, the hardware tracking starts looking at whatever we
1016  * programmed as the display plane base address register. It does not look at
1017  * the X and Y offset registers. That's why we include the src x/y offsets
1018  * instead of just looking at the plane size.
1019  */
1020 static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *plane_state)
1021 {
1022         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
1023         unsigned int effective_w, effective_h, max_w, max_h;
1024
1025         if (DISPLAY_VER(i915) >= 10) {
1026                 max_w = 5120;
1027                 max_h = 4096;
1028         } else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) {
1029                 max_w = 4096;
1030                 max_h = 4096;
1031         } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) {
1032                 max_w = 4096;
1033                 max_h = 2048;
1034         } else {
1035                 max_w = 2048;
1036                 max_h = 1536;
1037         }
1038
1039         effective_w = plane_state->view.color_plane[0].x +
1040                 (drm_rect_width(&plane_state->uapi.src) >> 16);
1041         effective_h = plane_state->view.color_plane[0].y +
1042                 (drm_rect_height(&plane_state->uapi.src) >> 16);
1043
1044         return effective_w <= max_w && effective_h <= max_h;
1045 }
1046
1047 static bool i8xx_fbc_tiling_valid(const struct intel_plane_state *plane_state)
1048 {
1049         const struct drm_framebuffer *fb = plane_state->hw.fb;
1050
1051         return fb->modifier == I915_FORMAT_MOD_X_TILED;
1052 }
1053
1054 static bool skl_fbc_tiling_valid(const struct intel_plane_state *plane_state)
1055 {
1056         const struct drm_framebuffer *fb = plane_state->hw.fb;
1057
1058         switch (fb->modifier) {
1059         case DRM_FORMAT_MOD_LINEAR:
1060         case I915_FORMAT_MOD_Y_TILED:
1061         case I915_FORMAT_MOD_Yf_TILED:
1062         case I915_FORMAT_MOD_4_TILED:
1063         case I915_FORMAT_MOD_X_TILED:
1064                 return true;
1065         default:
1066                 return false;
1067         }
1068 }
1069
1070 static bool tiling_is_valid(const struct intel_plane_state *plane_state)
1071 {
1072         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
1073
1074         if (DISPLAY_VER(i915) >= 9)
1075                 return skl_fbc_tiling_valid(plane_state);
1076         else
1077                 return i8xx_fbc_tiling_valid(plane_state);
1078 }
1079
1080 static void intel_fbc_update_state(struct intel_atomic_state *state,
1081                                    struct intel_crtc *crtc,
1082                                    struct intel_plane *plane)
1083 {
1084         struct drm_i915_private *i915 = to_i915(state->base.dev);
1085         const struct intel_crtc_state *crtc_state =
1086                 intel_atomic_get_new_crtc_state(state, crtc);
1087         const struct intel_plane_state *plane_state =
1088                 intel_atomic_get_new_plane_state(state, plane);
1089         struct intel_fbc *fbc = plane->fbc;
1090         struct intel_fbc_state *fbc_state = &fbc->state;
1091
1092         WARN_ON(plane_state->no_fbc_reason);
1093         WARN_ON(fbc_state->plane && fbc_state->plane != plane);
1094
1095         fbc_state->plane = plane;
1096
1097         /* FBC1 compression interval: arbitrary choice of 1 second */
1098         fbc_state->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode);
1099
1100         fbc_state->fence_y_offset = intel_plane_fence_y_offset(plane_state);
1101
1102         drm_WARN_ON(&i915->drm, plane_state->flags & PLANE_HAS_FENCE &&
1103                     !intel_gt_support_legacy_fencing(to_gt(i915)));
1104
1105         if (plane_state->flags & PLANE_HAS_FENCE)
1106                 fbc_state->fence_id =  i915_vma_fence_id(plane_state->ggtt_vma);
1107         else
1108                 fbc_state->fence_id = -1;
1109
1110         fbc_state->cfb_stride = intel_fbc_cfb_stride(plane_state);
1111         fbc_state->cfb_size = intel_fbc_cfb_size(plane_state);
1112         fbc_state->override_cfb_stride = intel_fbc_override_cfb_stride(plane_state);
1113 }
1114
1115 static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state)
1116 {
1117         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
1118
1119         /*
1120          * The use of a CPU fence is one of two ways to detect writes by the
1121          * CPU to the scanout and trigger updates to the FBC.
1122          *
1123          * The other method is by software tracking (see
1124          * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke
1125          * the current compressed buffer and recompress it.
1126          *
1127          * Note that is possible for a tiled surface to be unmappable (and
1128          * so have no fence associated with it) due to aperture constraints
1129          * at the time of pinning.
1130          */
1131         return DISPLAY_VER(i915) >= 9 ||
1132                 (plane_state->flags & PLANE_HAS_FENCE &&
1133                  i915_vma_fence_id(plane_state->ggtt_vma) != -1);
1134 }
1135
1136 static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state)
1137 {
1138         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1139         struct intel_fbc *fbc = plane->fbc;
1140
1141         return intel_fbc_min_limit(plane_state) <= fbc->limit &&
1142                 intel_fbc_cfb_size(plane_state) <= fbc->limit *
1143                         i915_gem_stolen_node_size(&fbc->compressed_fb);
1144 }
1145
1146 static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state)
1147 {
1148         return !plane_state->no_fbc_reason &&
1149                 intel_fbc_is_fence_ok(plane_state) &&
1150                 intel_fbc_is_cfb_ok(plane_state);
1151 }
1152
1153 static int intel_fbc_check_plane(struct intel_atomic_state *state,
1154                                  struct intel_plane *plane)
1155 {
1156         struct drm_i915_private *i915 = to_i915(state->base.dev);
1157         struct intel_plane_state *plane_state =
1158                 intel_atomic_get_new_plane_state(state, plane);
1159         const struct drm_framebuffer *fb = plane_state->hw.fb;
1160         struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
1161         const struct intel_crtc_state *crtc_state;
1162         struct intel_fbc *fbc = plane->fbc;
1163
1164         if (!fbc)
1165                 return 0;
1166
1167         if (!i915_gem_stolen_initialized(i915)) {
1168                 plane_state->no_fbc_reason = "stolen memory not initialised";
1169                 return 0;
1170         }
1171
1172         if (intel_vgpu_active(i915)) {
1173                 plane_state->no_fbc_reason = "VGPU active";
1174                 return 0;
1175         }
1176
1177         if (!i915->params.enable_fbc) {
1178                 plane_state->no_fbc_reason = "disabled per module param or by default";
1179                 return 0;
1180         }
1181
1182         if (!plane_state->uapi.visible) {
1183                 plane_state->no_fbc_reason = "plane not visible";
1184                 return 0;
1185         }
1186
1187         crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1188
1189         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
1190                 plane_state->no_fbc_reason = "interlaced mode not supported";
1191                 return 0;
1192         }
1193
1194         if (crtc_state->double_wide) {
1195                 plane_state->no_fbc_reason = "double wide pipe not supported";
1196                 return 0;
1197         }
1198
1199         /*
1200          * Display 12+ is not supporting FBC with PSR2.
1201          * Recommendation is to keep this combination disabled
1202          * Bspec: 50422 HSD: 14010260002
1203          */
1204         if (DISPLAY_VER(i915) >= 12 && crtc_state->has_psr2) {
1205                 plane_state->no_fbc_reason = "PSR2 enabled";
1206                 return 0;
1207         }
1208
1209         /* Wa_14016291713 */
1210         if ((IS_DISPLAY_VER(i915, 12, 13) ||
1211              IS_DISPLAY_IP_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_C0)) &&
1212             crtc_state->has_psr) {
1213                 plane_state->no_fbc_reason = "PSR1 enabled (Wa_14016291713)";
1214                 return 0;
1215         }
1216
1217         if (!pixel_format_is_valid(plane_state)) {
1218                 plane_state->no_fbc_reason = "pixel format not supported";
1219                 return 0;
1220         }
1221
1222         if (!tiling_is_valid(plane_state)) {
1223                 plane_state->no_fbc_reason = "tiling not supported";
1224                 return 0;
1225         }
1226
1227         if (!rotation_is_valid(plane_state)) {
1228                 plane_state->no_fbc_reason = "rotation not supported";
1229                 return 0;
1230         }
1231
1232         if (!stride_is_valid(plane_state)) {
1233                 plane_state->no_fbc_reason = "stride not supported";
1234                 return 0;
1235         }
1236
1237         if (DISPLAY_VER(i915) < 20 &&
1238             plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
1239             fb->format->has_alpha) {
1240                 plane_state->no_fbc_reason = "per-pixel alpha not supported";
1241                 return 0;
1242         }
1243
1244         if (!intel_fbc_hw_tracking_covers_screen(plane_state)) {
1245                 plane_state->no_fbc_reason = "plane size too big";
1246                 return 0;
1247         }
1248
1249         /*
1250          * Work around a problem on GEN9+ HW, where enabling FBC on a plane
1251          * having a Y offset that isn't divisible by 4 causes FIFO underrun
1252          * and screen flicker.
1253          */
1254         if (DISPLAY_VER(i915) >= 9 &&
1255             plane_state->view.color_plane[0].y & 3) {
1256                 plane_state->no_fbc_reason = "plane start Y offset misaligned";
1257                 return 0;
1258         }
1259
1260         /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
1261         if (DISPLAY_VER(i915) >= 11 &&
1262             (plane_state->view.color_plane[0].y +
1263              (drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) {
1264                 plane_state->no_fbc_reason = "plane end Y offset misaligned";
1265                 return 0;
1266         }
1267
1268         /* WaFbcExceedCdClockThreshold:hsw,bdw */
1269         if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
1270                 const struct intel_cdclk_state *cdclk_state;
1271
1272                 cdclk_state = intel_atomic_get_cdclk_state(state);
1273                 if (IS_ERR(cdclk_state))
1274                         return PTR_ERR(cdclk_state);
1275
1276                 if (crtc_state->pixel_rate >= cdclk_state->logical.cdclk * 95 / 100) {
1277                         plane_state->no_fbc_reason = "pixel rate too high";
1278                         return 0;
1279                 }
1280         }
1281
1282         plane_state->no_fbc_reason = NULL;
1283
1284         return 0;
1285 }
1286
1287
1288 static bool intel_fbc_can_flip_nuke(struct intel_atomic_state *state,
1289                                     struct intel_crtc *crtc,
1290                                     struct intel_plane *plane)
1291 {
1292         const struct intel_crtc_state *new_crtc_state =
1293                 intel_atomic_get_new_crtc_state(state, crtc);
1294         const struct intel_plane_state *old_plane_state =
1295                 intel_atomic_get_old_plane_state(state, plane);
1296         const struct intel_plane_state *new_plane_state =
1297                 intel_atomic_get_new_plane_state(state, plane);
1298         const struct drm_framebuffer *old_fb = old_plane_state->hw.fb;
1299         const struct drm_framebuffer *new_fb = new_plane_state->hw.fb;
1300
1301         if (intel_crtc_needs_modeset(new_crtc_state))
1302                 return false;
1303
1304         if (!intel_fbc_is_ok(old_plane_state) ||
1305             !intel_fbc_is_ok(new_plane_state))
1306                 return false;
1307
1308         if (old_fb->format->format != new_fb->format->format)
1309                 return false;
1310
1311         if (old_fb->modifier != new_fb->modifier)
1312                 return false;
1313
1314         if (intel_fbc_plane_stride(old_plane_state) !=
1315             intel_fbc_plane_stride(new_plane_state))
1316                 return false;
1317
1318         if (intel_fbc_cfb_stride(old_plane_state) !=
1319             intel_fbc_cfb_stride(new_plane_state))
1320                 return false;
1321
1322         if (intel_fbc_cfb_size(old_plane_state) !=
1323             intel_fbc_cfb_size(new_plane_state))
1324                 return false;
1325
1326         if (intel_fbc_override_cfb_stride(old_plane_state) !=
1327             intel_fbc_override_cfb_stride(new_plane_state))
1328                 return false;
1329
1330         return true;
1331 }
1332
1333 static bool __intel_fbc_pre_update(struct intel_atomic_state *state,
1334                                    struct intel_crtc *crtc,
1335                                    struct intel_plane *plane)
1336 {
1337         struct drm_i915_private *i915 = to_i915(state->base.dev);
1338         struct intel_fbc *fbc = plane->fbc;
1339         bool need_vblank_wait = false;
1340
1341         lockdep_assert_held(&fbc->lock);
1342
1343         fbc->flip_pending = true;
1344
1345         if (intel_fbc_can_flip_nuke(state, crtc, plane))
1346                 return need_vblank_wait;
1347
1348         intel_fbc_deactivate(fbc, "update pending");
1349
1350         /*
1351          * Display WA #1198: glk+
1352          * Need an extra vblank wait between FBC disable and most plane
1353          * updates. Bspec says this is only needed for plane disable, but
1354          * that is not true. Touching most plane registers will cause the
1355          * corruption to appear. Also SKL/derivatives do not seem to be
1356          * affected.
1357          *
1358          * TODO: could optimize this a bit by sampling the frame
1359          * counter when we disable FBC (if it was already done earlier)
1360          * and skipping the extra vblank wait before the plane update
1361          * if at least one frame has already passed.
1362          */
1363         if (fbc->activated && DISPLAY_VER(i915) >= 10)
1364                 need_vblank_wait = true;
1365         fbc->activated = false;
1366
1367         return need_vblank_wait;
1368 }
1369
1370 bool intel_fbc_pre_update(struct intel_atomic_state *state,
1371                           struct intel_crtc *crtc)
1372 {
1373         const struct intel_plane_state __maybe_unused *plane_state;
1374         bool need_vblank_wait = false;
1375         struct intel_plane *plane;
1376         int i;
1377
1378         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1379                 struct intel_fbc *fbc = plane->fbc;
1380
1381                 if (!fbc || plane->pipe != crtc->pipe)
1382                         continue;
1383
1384                 mutex_lock(&fbc->lock);
1385
1386                 if (fbc->state.plane == plane)
1387                         need_vblank_wait |= __intel_fbc_pre_update(state, crtc, plane);
1388
1389                 mutex_unlock(&fbc->lock);
1390         }
1391
1392         return need_vblank_wait;
1393 }
1394
1395 static void __intel_fbc_disable(struct intel_fbc *fbc)
1396 {
1397         struct drm_i915_private *i915 = fbc->i915;
1398         struct intel_plane *plane = fbc->state.plane;
1399
1400         lockdep_assert_held(&fbc->lock);
1401         drm_WARN_ON(&i915->drm, fbc->active);
1402
1403         drm_dbg_kms(&i915->drm, "Disabling FBC on [PLANE:%d:%s]\n",
1404                     plane->base.base.id, plane->base.name);
1405
1406         __intel_fbc_cleanup_cfb(fbc);
1407
1408         fbc->state.plane = NULL;
1409         fbc->flip_pending = false;
1410         fbc->busy_bits = 0;
1411 }
1412
1413 static void __intel_fbc_post_update(struct intel_fbc *fbc)
1414 {
1415         lockdep_assert_held(&fbc->lock);
1416
1417         fbc->flip_pending = false;
1418         fbc->busy_bits = 0;
1419
1420         intel_fbc_activate(fbc);
1421 }
1422
1423 void intel_fbc_post_update(struct intel_atomic_state *state,
1424                            struct intel_crtc *crtc)
1425 {
1426         const struct intel_plane_state __maybe_unused *plane_state;
1427         struct intel_plane *plane;
1428         int i;
1429
1430         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1431                 struct intel_fbc *fbc = plane->fbc;
1432
1433                 if (!fbc || plane->pipe != crtc->pipe)
1434                         continue;
1435
1436                 mutex_lock(&fbc->lock);
1437
1438                 if (fbc->state.plane == plane)
1439                         __intel_fbc_post_update(fbc);
1440
1441                 mutex_unlock(&fbc->lock);
1442         }
1443 }
1444
1445 static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
1446 {
1447         if (fbc->state.plane)
1448                 return fbc->state.plane->frontbuffer_bit;
1449         else
1450                 return 0;
1451 }
1452
1453 static void __intel_fbc_invalidate(struct intel_fbc *fbc,
1454                                    unsigned int frontbuffer_bits,
1455                                    enum fb_op_origin origin)
1456 {
1457         if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
1458                 return;
1459
1460         mutex_lock(&fbc->lock);
1461
1462         frontbuffer_bits &= intel_fbc_get_frontbuffer_bit(fbc);
1463         if (!frontbuffer_bits)
1464                 goto out;
1465
1466         fbc->busy_bits |= frontbuffer_bits;
1467         intel_fbc_deactivate(fbc, "frontbuffer write");
1468
1469 out:
1470         mutex_unlock(&fbc->lock);
1471 }
1472
1473 void intel_fbc_invalidate(struct drm_i915_private *i915,
1474                           unsigned int frontbuffer_bits,
1475                           enum fb_op_origin origin)
1476 {
1477         struct intel_fbc *fbc;
1478         enum intel_fbc_id fbc_id;
1479
1480         for_each_intel_fbc(i915, fbc, fbc_id)
1481                 __intel_fbc_invalidate(fbc, frontbuffer_bits, origin);
1482
1483 }
1484
1485 static void __intel_fbc_flush(struct intel_fbc *fbc,
1486                               unsigned int frontbuffer_bits,
1487                               enum fb_op_origin origin)
1488 {
1489         mutex_lock(&fbc->lock);
1490
1491         frontbuffer_bits &= intel_fbc_get_frontbuffer_bit(fbc);
1492         if (!frontbuffer_bits)
1493                 goto out;
1494
1495         fbc->busy_bits &= ~frontbuffer_bits;
1496
1497         if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
1498                 goto out;
1499
1500         if (fbc->busy_bits || fbc->flip_pending)
1501                 goto out;
1502
1503         if (fbc->active)
1504                 intel_fbc_nuke(fbc);
1505         else
1506                 intel_fbc_activate(fbc);
1507
1508 out:
1509         mutex_unlock(&fbc->lock);
1510 }
1511
1512 void intel_fbc_flush(struct drm_i915_private *i915,
1513                      unsigned int frontbuffer_bits,
1514                      enum fb_op_origin origin)
1515 {
1516         struct intel_fbc *fbc;
1517         enum intel_fbc_id fbc_id;
1518
1519         for_each_intel_fbc(i915, fbc, fbc_id)
1520                 __intel_fbc_flush(fbc, frontbuffer_bits, origin);
1521 }
1522
1523 int intel_fbc_atomic_check(struct intel_atomic_state *state)
1524 {
1525         struct intel_plane_state __maybe_unused *plane_state;
1526         struct intel_plane *plane;
1527         int i;
1528
1529         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1530                 int ret;
1531
1532                 ret = intel_fbc_check_plane(state, plane);
1533                 if (ret)
1534                         return ret;
1535         }
1536
1537         return 0;
1538 }
1539
1540 static void __intel_fbc_enable(struct intel_atomic_state *state,
1541                                struct intel_crtc *crtc,
1542                                struct intel_plane *plane)
1543 {
1544         struct drm_i915_private *i915 = to_i915(state->base.dev);
1545         const struct intel_plane_state *plane_state =
1546                 intel_atomic_get_new_plane_state(state, plane);
1547         struct intel_fbc *fbc = plane->fbc;
1548
1549         lockdep_assert_held(&fbc->lock);
1550
1551         if (fbc->state.plane) {
1552                 if (fbc->state.plane != plane)
1553                         return;
1554
1555                 if (intel_fbc_is_ok(plane_state)) {
1556                         intel_fbc_update_state(state, crtc, plane);
1557                         return;
1558                 }
1559
1560                 __intel_fbc_disable(fbc);
1561         }
1562
1563         drm_WARN_ON(&i915->drm, fbc->active);
1564
1565         fbc->no_fbc_reason = plane_state->no_fbc_reason;
1566         if (fbc->no_fbc_reason)
1567                 return;
1568
1569         if (!intel_fbc_is_fence_ok(plane_state)) {
1570                 fbc->no_fbc_reason = "framebuffer not fenced";
1571                 return;
1572         }
1573
1574         if (fbc->underrun_detected) {
1575                 fbc->no_fbc_reason = "FIFO underrun";
1576                 return;
1577         }
1578
1579         if (intel_fbc_alloc_cfb(fbc, intel_fbc_cfb_size(plane_state),
1580                                 intel_fbc_min_limit(plane_state))) {
1581                 fbc->no_fbc_reason = "not enough stolen memory";
1582                 return;
1583         }
1584
1585         drm_dbg_kms(&i915->drm, "Enabling FBC on [PLANE:%d:%s]\n",
1586                     plane->base.base.id, plane->base.name);
1587         fbc->no_fbc_reason = "FBC enabled but not active yet\n";
1588
1589         intel_fbc_update_state(state, crtc, plane);
1590
1591         intel_fbc_program_workarounds(fbc);
1592         intel_fbc_program_cfb(fbc);
1593 }
1594
1595 /**
1596  * intel_fbc_disable - disable FBC if it's associated with crtc
1597  * @crtc: the CRTC
1598  *
1599  * This function disables FBC if it's associated with the provided CRTC.
1600  */
1601 void intel_fbc_disable(struct intel_crtc *crtc)
1602 {
1603         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1604         struct intel_plane *plane;
1605
1606         for_each_intel_plane(&i915->drm, plane) {
1607                 struct intel_fbc *fbc = plane->fbc;
1608
1609                 if (!fbc || plane->pipe != crtc->pipe)
1610                         continue;
1611
1612                 mutex_lock(&fbc->lock);
1613                 if (fbc->state.plane == plane)
1614                         __intel_fbc_disable(fbc);
1615                 mutex_unlock(&fbc->lock);
1616         }
1617 }
1618
1619 void intel_fbc_update(struct intel_atomic_state *state,
1620                       struct intel_crtc *crtc)
1621 {
1622         const struct intel_crtc_state *crtc_state =
1623                 intel_atomic_get_new_crtc_state(state, crtc);
1624         const struct intel_plane_state *plane_state;
1625         struct intel_plane *plane;
1626         int i;
1627
1628         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1629                 struct intel_fbc *fbc = plane->fbc;
1630
1631                 if (!fbc || plane->pipe != crtc->pipe)
1632                         continue;
1633
1634                 mutex_lock(&fbc->lock);
1635
1636                 if (intel_crtc_needs_fastset(crtc_state) &&
1637                     plane_state->no_fbc_reason) {
1638                         if (fbc->state.plane == plane)
1639                                 __intel_fbc_disable(fbc);
1640                 } else {
1641                         __intel_fbc_enable(state, crtc, plane);
1642                 }
1643
1644                 mutex_unlock(&fbc->lock);
1645         }
1646 }
1647
1648 static void intel_fbc_underrun_work_fn(struct work_struct *work)
1649 {
1650         struct intel_fbc *fbc = container_of(work, typeof(*fbc), underrun_work);
1651         struct drm_i915_private *i915 = fbc->i915;
1652
1653         mutex_lock(&fbc->lock);
1654
1655         /* Maybe we were scheduled twice. */
1656         if (fbc->underrun_detected || !fbc->state.plane)
1657                 goto out;
1658
1659         drm_dbg_kms(&i915->drm, "Disabling FBC due to FIFO underrun.\n");
1660         fbc->underrun_detected = true;
1661
1662         intel_fbc_deactivate(fbc, "FIFO underrun");
1663         if (!fbc->flip_pending)
1664                 intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(i915, fbc->state.plane->pipe));
1665         __intel_fbc_disable(fbc);
1666 out:
1667         mutex_unlock(&fbc->lock);
1668 }
1669
1670 static void __intel_fbc_reset_underrun(struct intel_fbc *fbc)
1671 {
1672         struct drm_i915_private *i915 = fbc->i915;
1673
1674         cancel_work_sync(&fbc->underrun_work);
1675
1676         mutex_lock(&fbc->lock);
1677
1678         if (fbc->underrun_detected) {
1679                 drm_dbg_kms(&i915->drm,
1680                             "Re-allowing FBC after fifo underrun\n");
1681                 fbc->no_fbc_reason = "FIFO underrun cleared";
1682         }
1683
1684         fbc->underrun_detected = false;
1685         mutex_unlock(&fbc->lock);
1686 }
1687
1688 /*
1689  * intel_fbc_reset_underrun - reset FBC fifo underrun status.
1690  * @i915: the i915 device
1691  *
1692  * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
1693  * want to re-enable FBC after an underrun to increase test coverage.
1694  */
1695 void intel_fbc_reset_underrun(struct drm_i915_private *i915)
1696 {
1697         struct intel_fbc *fbc;
1698         enum intel_fbc_id fbc_id;
1699
1700         for_each_intel_fbc(i915, fbc, fbc_id)
1701                 __intel_fbc_reset_underrun(fbc);
1702 }
1703
1704 static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
1705 {
1706         /*
1707          * There's no guarantee that underrun_detected won't be set to true
1708          * right after this check and before the work is scheduled, but that's
1709          * not a problem since we'll check it again under the work function
1710          * while FBC is locked. This check here is just to prevent us from
1711          * unnecessarily scheduling the work, and it relies on the fact that we
1712          * never switch underrun_detect back to false after it's true.
1713          */
1714         if (READ_ONCE(fbc->underrun_detected))
1715                 return;
1716
1717         queue_work(fbc->i915->unordered_wq, &fbc->underrun_work);
1718 }
1719
1720 /**
1721  * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
1722  * @i915: i915 device
1723  *
1724  * Without FBC, most underruns are harmless and don't really cause too many
1725  * problems, except for an annoying message on dmesg. With FBC, underruns can
1726  * become black screens or even worse, especially when paired with bad
1727  * watermarks. So in order for us to be on the safe side, completely disable FBC
1728  * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
1729  * already suggests that watermarks may be bad, so try to be as safe as
1730  * possible.
1731  *
1732  * This function is called from the IRQ handler.
1733  */
1734 void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915)
1735 {
1736         struct intel_fbc *fbc;
1737         enum intel_fbc_id fbc_id;
1738
1739         for_each_intel_fbc(i915, fbc, fbc_id)
1740                 __intel_fbc_handle_fifo_underrun_irq(fbc);
1741 }
1742
1743 /*
1744  * The DDX driver changes its behavior depending on the value it reads from
1745  * i915.enable_fbc, so sanitize it by translating the default value into either
1746  * 0 or 1 in order to allow it to know what's going on.
1747  *
1748  * Notice that this is done at driver initialization and we still allow user
1749  * space to change the value during runtime without sanitizing it again. IGT
1750  * relies on being able to change i915.enable_fbc at runtime.
1751  */
1752 static int intel_sanitize_fbc_option(struct drm_i915_private *i915)
1753 {
1754         if (i915->params.enable_fbc >= 0)
1755                 return !!i915->params.enable_fbc;
1756
1757         if (!HAS_FBC(i915))
1758                 return 0;
1759
1760         if (IS_BROADWELL(i915) || DISPLAY_VER(i915) >= 9)
1761                 return 1;
1762
1763         return 0;
1764 }
1765
1766 static bool need_fbc_vtd_wa(struct drm_i915_private *i915)
1767 {
1768         /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
1769         if (i915_vtd_active(i915) &&
1770             (IS_SKYLAKE(i915) || IS_BROXTON(i915))) {
1771                 drm_info(&i915->drm,
1772                          "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
1773                 return true;
1774         }
1775
1776         return false;
1777 }
1778
1779 void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane)
1780 {
1781         plane->fbc = fbc;
1782 }
1783
1784 static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915,
1785                                           enum intel_fbc_id fbc_id)
1786 {
1787         struct intel_fbc *fbc;
1788
1789         fbc = kzalloc(sizeof(*fbc), GFP_KERNEL);
1790         if (!fbc)
1791                 return NULL;
1792
1793         fbc->id = fbc_id;
1794         fbc->i915 = i915;
1795         INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
1796         mutex_init(&fbc->lock);
1797
1798         if (DISPLAY_VER(i915) >= 7)
1799                 fbc->funcs = &ivb_fbc_funcs;
1800         else if (DISPLAY_VER(i915) == 6)
1801                 fbc->funcs = &snb_fbc_funcs;
1802         else if (DISPLAY_VER(i915) == 5)
1803                 fbc->funcs = &ilk_fbc_funcs;
1804         else if (IS_G4X(i915))
1805                 fbc->funcs = &g4x_fbc_funcs;
1806         else if (DISPLAY_VER(i915) == 4)
1807                 fbc->funcs = &i965_fbc_funcs;
1808         else
1809                 fbc->funcs = &i8xx_fbc_funcs;
1810
1811         return fbc;
1812 }
1813
1814 /**
1815  * intel_fbc_init - Initialize FBC
1816  * @i915: the i915 device
1817  *
1818  * This function might be called during PM init process.
1819  */
1820 void intel_fbc_init(struct drm_i915_private *i915)
1821 {
1822         enum intel_fbc_id fbc_id;
1823
1824         if (need_fbc_vtd_wa(i915))
1825                 DISPLAY_RUNTIME_INFO(i915)->fbc_mask = 0;
1826
1827         i915->params.enable_fbc = intel_sanitize_fbc_option(i915);
1828         drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n",
1829                     i915->params.enable_fbc);
1830
1831         for_each_fbc_id(i915, fbc_id)
1832                 i915->display.fbc[fbc_id] = intel_fbc_create(i915, fbc_id);
1833 }
1834
1835 /**
1836  * intel_fbc_sanitize - Sanitize FBC
1837  * @i915: the i915 device
1838  *
1839  * Make sure FBC is initially disabled since we have no
1840  * idea eg. into which parts of stolen it might be scribbling
1841  * into.
1842  */
1843 void intel_fbc_sanitize(struct drm_i915_private *i915)
1844 {
1845         struct intel_fbc *fbc;
1846         enum intel_fbc_id fbc_id;
1847
1848         for_each_intel_fbc(i915, fbc, fbc_id) {
1849                 if (intel_fbc_hw_is_active(fbc))
1850                         intel_fbc_hw_deactivate(fbc);
1851         }
1852 }
1853
1854 static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
1855 {
1856         struct intel_fbc *fbc = m->private;
1857         struct drm_i915_private *i915 = fbc->i915;
1858         struct intel_plane *plane;
1859         intel_wakeref_t wakeref;
1860
1861         drm_modeset_lock_all(&i915->drm);
1862
1863         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1864         mutex_lock(&fbc->lock);
1865
1866         if (fbc->active) {
1867                 seq_puts(m, "FBC enabled\n");
1868                 seq_printf(m, "Compressing: %s\n",
1869                            str_yes_no(intel_fbc_is_compressing(fbc)));
1870         } else {
1871                 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1872         }
1873
1874         for_each_intel_plane(&i915->drm, plane) {
1875                 const struct intel_plane_state *plane_state =
1876                         to_intel_plane_state(plane->base.state);
1877
1878                 if (plane->fbc != fbc)
1879                         continue;
1880
1881                 seq_printf(m, "%c [PLANE:%d:%s]: %s\n",
1882                            fbc->state.plane == plane ? '*' : ' ',
1883                            plane->base.base.id, plane->base.name,
1884                            plane_state->no_fbc_reason ?: "FBC possible");
1885         }
1886
1887         mutex_unlock(&fbc->lock);
1888         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1889
1890         drm_modeset_unlock_all(&i915->drm);
1891
1892         return 0;
1893 }
1894
1895 DEFINE_SHOW_ATTRIBUTE(intel_fbc_debugfs_status);
1896
1897 static int intel_fbc_debugfs_false_color_get(void *data, u64 *val)
1898 {
1899         struct intel_fbc *fbc = data;
1900
1901         *val = fbc->false_color;
1902
1903         return 0;
1904 }
1905
1906 static int intel_fbc_debugfs_false_color_set(void *data, u64 val)
1907 {
1908         struct intel_fbc *fbc = data;
1909
1910         mutex_lock(&fbc->lock);
1911
1912         fbc->false_color = val;
1913
1914         if (fbc->active)
1915                 fbc->funcs->set_false_color(fbc, fbc->false_color);
1916
1917         mutex_unlock(&fbc->lock);
1918
1919         return 0;
1920 }
1921
1922 DEFINE_DEBUGFS_ATTRIBUTE(intel_fbc_debugfs_false_color_fops,
1923                          intel_fbc_debugfs_false_color_get,
1924                          intel_fbc_debugfs_false_color_set,
1925                          "%llu\n");
1926
1927 static void intel_fbc_debugfs_add(struct intel_fbc *fbc,
1928                                   struct dentry *parent)
1929 {
1930         debugfs_create_file("i915_fbc_status", 0444, parent,
1931                             fbc, &intel_fbc_debugfs_status_fops);
1932
1933         if (fbc->funcs->set_false_color)
1934                 debugfs_create_file_unsafe("i915_fbc_false_color", 0644, parent,
1935                                            fbc, &intel_fbc_debugfs_false_color_fops);
1936 }
1937
1938 void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc)
1939 {
1940         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1941
1942         if (plane->fbc)
1943                 intel_fbc_debugfs_add(plane->fbc, crtc->base.debugfs_entry);
1944 }
1945
1946 /* FIXME: remove this once igt is on board with per-crtc stuff */
1947 void intel_fbc_debugfs_register(struct drm_i915_private *i915)
1948 {
1949         struct drm_minor *minor = i915->drm.primary;
1950         struct intel_fbc *fbc;
1951
1952         fbc = i915->display.fbc[INTEL_FBC_A];
1953         if (fbc)
1954                 intel_fbc_debugfs_add(fbc, minor->debugfs_root);
1955 }
This page took 0.160153 seconds and 4 git commands to generate.