]> Git Repo - J-linux.git/blob - drivers/gpu/drm/vc4/vc4_crtc.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / gpu / drm / vc4 / vc4_crtc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 Broadcom
4  */
5
6 /**
7  * DOC: VC4 CRTC module
8  *
9  * In VC4, the Pixel Valve is what most closely corresponds to the
10  * DRM's concept of a CRTC.  The PV generates video timings from the
11  * encoder's clock plus its configuration.  It pulls scaled pixels from
12  * the HVS at that timing, and feeds it to the encoder.
13  *
14  * However, the DRM CRTC also collects the configuration of all the
15  * DRM planes attached to it.  As a result, the CRTC is also
16  * responsible for writing the display list for the HVS channel that
17  * the CRTC will use.
18  *
19  * The 2835 has 3 different pixel valves.  pv0 in the audio power
20  * domain feeds DSI0 or DPI, while pv1 feeds DS1 or SMI.  pv2 in the
21  * image domain can feed either HDMI or the SDTV controller.  The
22  * pixel valve chooses from the CPRMAN clocks (HSM for HDMI, VEC for
23  * SDTV, etc.) according to which output type is chosen in the mux.
24  *
25  * For power management, the pixel valve's registers are all clocked
26  * by the AXI clock, while the timings and FIFOs make use of the
27  * output-specific clock.  Since the encoders also directly consume
28  * the CPRMAN clocks, and know what timings they need, they are the
29  * ones that set the clock.
30  */
31
32 #include <linux/clk.h>
33 #include <linux/component.h>
34 #include <linux/of.h>
35 #include <linux/platform_device.h>
36 #include <linux/pm_runtime.h>
37
38 #include <drm/drm_atomic.h>
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_atomic_uapi.h>
41 #include <drm/drm_fb_dma_helper.h>
42 #include <drm/drm_framebuffer.h>
43 #include <drm/drm_drv.h>
44 #include <drm/drm_print.h>
45 #include <drm/drm_probe_helper.h>
46 #include <drm/drm_vblank.h>
47
48 #include "vc4_drv.h"
49 #include "vc4_hdmi.h"
50 #include "vc4_regs.h"
51
52 #define HVS_FIFO_LATENCY_PIX    6
53
54 #define CRTC_WRITE(offset, val)                                                         \
55         do {                                                                            \
56                 kunit_fail_current_test("Accessing a register in a unit test!\n");      \
57                 writel(val, vc4_crtc->regs + (offset));                                 \
58         } while (0)
59
60 #define CRTC_READ(offset)                                                               \
61         ({                                                                              \
62                 kunit_fail_current_test("Accessing a register in a unit test!\n");      \
63                 readl(vc4_crtc->regs + (offset));                                       \
64         })
65
66 static const struct debugfs_reg32 crtc_regs[] = {
67         VC4_REG32(PV_CONTROL),
68         VC4_REG32(PV_V_CONTROL),
69         VC4_REG32(PV_VSYNCD_EVEN),
70         VC4_REG32(PV_HORZA),
71         VC4_REG32(PV_HORZB),
72         VC4_REG32(PV_VERTA),
73         VC4_REG32(PV_VERTB),
74         VC4_REG32(PV_VERTA_EVEN),
75         VC4_REG32(PV_VERTB_EVEN),
76         VC4_REG32(PV_INTEN),
77         VC4_REG32(PV_INTSTAT),
78         VC4_REG32(PV_STAT),
79         VC4_REG32(PV_HACT_ACT),
80 };
81
82 static unsigned int
83 vc4_crtc_get_cob_allocation(struct vc4_dev *vc4, unsigned int channel)
84 {
85         struct vc4_hvs *hvs = vc4->hvs;
86         u32 dispbase = HVS_READ(SCALER_DISPBASEX(channel));
87         /* Top/base are supposed to be 4-pixel aligned, but the
88          * Raspberry Pi firmware fills the low bits (which are
89          * presumably ignored).
90          */
91         u32 top = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_TOP) & ~3;
92         u32 base = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_BASE) & ~3;
93
94         return top - base + 4;
95 }
96
97 static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc,
98                                           bool in_vblank_irq,
99                                           int *vpos, int *hpos,
100                                           ktime_t *stime, ktime_t *etime,
101                                           const struct drm_display_mode *mode)
102 {
103         struct drm_device *dev = crtc->dev;
104         struct vc4_dev *vc4 = to_vc4_dev(dev);
105         struct vc4_hvs *hvs = vc4->hvs;
106         struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
107         struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state);
108         unsigned int channel = vc4_crtc_state->assigned_channel;
109         unsigned int cob_size;
110         u32 val;
111         int fifo_lines;
112         int vblank_lines;
113         bool ret = false;
114
115         /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
116
117         /* Get optional system timestamp before query. */
118         if (stime)
119                 *stime = ktime_get();
120
121         /*
122          * Read vertical scanline which is currently composed for our
123          * pixelvalve by the HVS, and also the scaler status.
124          */
125         val = HVS_READ(SCALER_DISPSTATX(channel));
126
127         /* Get optional system timestamp after query. */
128         if (etime)
129                 *etime = ktime_get();
130
131         /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
132
133         /* Vertical position of hvs composed scanline. */
134         *vpos = VC4_GET_FIELD(val, SCALER_DISPSTATX_LINE);
135         *hpos = 0;
136
137         if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
138                 *vpos /= 2;
139
140                 /* Use hpos to correct for field offset in interlaced mode. */
141                 if (vc4_hvs_get_fifo_frame_count(hvs, channel) % 2)
142                         *hpos += mode->crtc_htotal / 2;
143         }
144
145         cob_size = vc4_crtc_get_cob_allocation(vc4, channel);
146         /* This is the offset we need for translating hvs -> pv scanout pos. */
147         fifo_lines = cob_size / mode->crtc_hdisplay;
148
149         if (fifo_lines > 0)
150                 ret = true;
151
152         /* HVS more than fifo_lines into frame for compositing? */
153         if (*vpos > fifo_lines) {
154                 /*
155                  * We are in active scanout and can get some meaningful results
156                  * from HVS. The actual PV scanout can not trail behind more
157                  * than fifo_lines as that is the fifo's capacity. Assume that
158                  * in active scanout the HVS and PV work in lockstep wrt. HVS
159                  * refilling the fifo and PV consuming from the fifo, ie.
160                  * whenever the PV consumes and frees up a scanline in the
161                  * fifo, the HVS will immediately refill it, therefore
162                  * incrementing vpos. Therefore we choose HVS read position -
163                  * fifo size in scanlines as a estimate of the real scanout
164                  * position of the PV.
165                  */
166                 *vpos -= fifo_lines + 1;
167
168                 return ret;
169         }
170
171         /*
172          * Less: This happens when we are in vblank and the HVS, after getting
173          * the VSTART restart signal from the PV, just started refilling its
174          * fifo with new lines from the top-most lines of the new framebuffers.
175          * The PV does not scan out in vblank, so does not remove lines from
176          * the fifo, so the fifo will be full quickly and the HVS has to pause.
177          * We can't get meaningful readings wrt. scanline position of the PV
178          * and need to make things up in a approximative but consistent way.
179          */
180         vblank_lines = mode->vtotal - mode->vdisplay;
181
182         if (in_vblank_irq) {
183                 /*
184                  * Assume the irq handler got called close to first
185                  * line of vblank, so PV has about a full vblank
186                  * scanlines to go, and as a base timestamp use the
187                  * one taken at entry into vblank irq handler, so it
188                  * is not affected by random delays due to lock
189                  * contention on event_lock or vblank_time lock in
190                  * the core.
191                  */
192                 *vpos = -vblank_lines;
193
194                 if (stime)
195                         *stime = vc4_crtc->t_vblank;
196                 if (etime)
197                         *etime = vc4_crtc->t_vblank;
198
199                 /*
200                  * If the HVS fifo is not yet full then we know for certain
201                  * we are at the very beginning of vblank, as the hvs just
202                  * started refilling, and the stime and etime timestamps
203                  * truly correspond to start of vblank.
204                  *
205                  * Unfortunately there's no way to report this to upper levels
206                  * and make it more useful.
207                  */
208         } else {
209                 /*
210                  * No clue where we are inside vblank. Return a vpos of zero,
211                  * which will cause calling code to just return the etime
212                  * timestamp uncorrected. At least this is no worse than the
213                  * standard fallback.
214                  */
215                 *vpos = 0;
216         }
217
218         return ret;
219 }
220
221 static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
222 {
223         const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc);
224         const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
225         struct vc4_dev *vc4 = to_vc4_dev(vc4_crtc->base.dev);
226         u32 fifo_len_bytes = pv_data->fifo_depth;
227
228         /*
229          * Pixels are pulled from the HVS if the number of bytes is
230          * lower than the FIFO full level.
231          *
232          * The latency of the pixel fetch mechanism is 6 pixels, so we
233          * need to convert those 6 pixels in bytes, depending on the
234          * format, and then subtract that from the length of the FIFO
235          * to make sure we never end up in a situation where the FIFO
236          * is full.
237          */
238         switch (format) {
239         case PV_CONTROL_FORMAT_DSIV_16:
240         case PV_CONTROL_FORMAT_DSIC_16:
241                 return fifo_len_bytes - 2 * HVS_FIFO_LATENCY_PIX;
242         case PV_CONTROL_FORMAT_DSIV_18:
243                 return fifo_len_bytes - 14;
244         case PV_CONTROL_FORMAT_24:
245         case PV_CONTROL_FORMAT_DSIV_24:
246         default:
247                 /*
248                  * For some reason, the pixelvalve4 doesn't work with
249                  * the usual formula and will only work with 32.
250                  */
251                 if (crtc_data->hvs_output == 5)
252                         return 32;
253
254                 /*
255                  * It looks like in some situations, we will overflow
256                  * the PixelValve FIFO (with the bit 10 of PV stat being
257                  * set) and stall the HVS / PV, eventually resulting in
258                  * a page flip timeout.
259                  *
260                  * Displaying the video overlay during a playback with
261                  * Kodi on an RPi3 seems to be a great solution with a
262                  * failure rate around 50%.
263                  *
264                  * Removing 1 from the FIFO full level however
265                  * seems to completely remove that issue.
266                  */
267                 if (vc4->gen == VC4_GEN_4)
268                         return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1;
269
270                 return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX;
271         }
272 }
273
274 static u32 vc4_crtc_get_fifo_full_level_bits(struct vc4_crtc *vc4_crtc,
275                                              u32 format)
276 {
277         u32 level = vc4_get_fifo_full_level(vc4_crtc, format);
278         u32 ret = 0;
279
280         ret |= VC4_SET_FIELD((level >> 6),
281                              PV5_CONTROL_FIFO_LEVEL_HIGH);
282
283         return ret | VC4_SET_FIELD(level & 0x3f,
284                                    PV_CONTROL_FIFO_LEVEL);
285 }
286
287 /*
288  * Returns the encoder attached to the CRTC.
289  *
290  * VC4 can only scan out to one encoder at a time, while the DRM core
291  * allows drivers to push pixels to more than one encoder from the
292  * same CRTC.
293  */
294 struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc,
295                                          struct drm_crtc_state *state)
296 {
297         struct drm_encoder *encoder;
298
299         WARN_ON(hweight32(state->encoder_mask) > 1);
300
301         drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask)
302                 return encoder;
303
304         return NULL;
305 }
306
307 static void vc4_crtc_pixelvalve_reset(struct drm_crtc *crtc)
308 {
309         struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
310         struct drm_device *dev = crtc->dev;
311         int idx;
312
313         if (!drm_dev_enter(dev, &idx))
314                 return;
315
316         /* The PV needs to be disabled before it can be flushed */
317         CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) & ~PV_CONTROL_EN);
318         CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_FIFO_CLR);
319
320         drm_dev_exit(idx);
321 }
322
323 static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encoder,
324                                struct drm_atomic_state *state)
325 {
326         struct drm_device *dev = crtc->dev;
327         struct vc4_dev *vc4 = to_vc4_dev(dev);
328         struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
329         struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
330         const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
331         struct drm_crtc_state *crtc_state = crtc->state;
332         struct drm_display_mode *mode = &crtc_state->adjusted_mode;
333         bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
334         bool is_hdmi = vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0 ||
335                        vc4_encoder->type == VC4_ENCODER_TYPE_HDMI1;
336         u32 pixel_rep = ((mode->flags & DRM_MODE_FLAG_DBLCLK) && !is_hdmi) ? 2 : 1;
337         bool is_dsi = (vc4_encoder->type == VC4_ENCODER_TYPE_DSI0 ||
338                        vc4_encoder->type == VC4_ENCODER_TYPE_DSI1);
339         bool is_dsi1 = vc4_encoder->type == VC4_ENCODER_TYPE_DSI1;
340         bool is_vec = vc4_encoder->type == VC4_ENCODER_TYPE_VEC;
341         u32 format = is_dsi1 ? PV_CONTROL_FORMAT_DSIV_24 : PV_CONTROL_FORMAT_24;
342         u8 ppc = pv_data->pixels_per_clock;
343
344         u16 vert_bp = mode->crtc_vtotal - mode->crtc_vsync_end;
345         u16 vert_sync = mode->crtc_vsync_end - mode->crtc_vsync_start;
346         u16 vert_fp = mode->crtc_vsync_start - mode->crtc_vdisplay;
347
348         bool debug_dump_regs = false;
349         int idx;
350
351         if (!drm_dev_enter(dev, &idx))
352                 return;
353
354         if (debug_dump_regs) {
355                 struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev);
356                 dev_info(&vc4_crtc->pdev->dev, "CRTC %d regs before:\n",
357                          drm_crtc_index(crtc));
358                 drm_print_regset32(&p, &vc4_crtc->regset);
359         }
360
361         vc4_crtc_pixelvalve_reset(crtc);
362
363         CRTC_WRITE(PV_HORZA,
364                    VC4_SET_FIELD((mode->htotal - mode->hsync_end) * pixel_rep / ppc,
365                                  PV_HORZA_HBP) |
366                    VC4_SET_FIELD((mode->hsync_end - mode->hsync_start) * pixel_rep / ppc,
367                                  PV_HORZA_HSYNC));
368
369         CRTC_WRITE(PV_HORZB,
370                    VC4_SET_FIELD((mode->hsync_start - mode->hdisplay) * pixel_rep / ppc,
371                                  PV_HORZB_HFP) |
372                    VC4_SET_FIELD(mode->hdisplay * pixel_rep / ppc,
373                                  PV_HORZB_HACTIVE));
374
375         if (interlace) {
376                 bool odd_field_first = false;
377                 u32 field_delay = mode->htotal * pixel_rep / (2 * ppc);
378                 u16 vert_bp_even = vert_bp;
379                 u16 vert_fp_even = vert_fp;
380
381                 if (is_vec) {
382                         /* VEC (composite output) */
383                         ++field_delay;
384                         if (mode->htotal == 858) {
385                                 /* 525-line mode (NTSC or PAL-M) */
386                                 odd_field_first = true;
387                         }
388                 }
389
390                 if (odd_field_first)
391                         ++vert_fp_even;
392                 else
393                         ++vert_bp;
394
395                 CRTC_WRITE(PV_VERTA_EVEN,
396                            VC4_SET_FIELD(vert_bp_even, PV_VERTA_VBP) |
397                            VC4_SET_FIELD(vert_sync, PV_VERTA_VSYNC));
398                 CRTC_WRITE(PV_VERTB_EVEN,
399                            VC4_SET_FIELD(vert_fp_even, PV_VERTB_VFP) |
400                            VC4_SET_FIELD(mode->crtc_vdisplay, PV_VERTB_VACTIVE));
401
402                 /* We set up first field even mode for HDMI and VEC's PAL.
403                  * For NTSC, we need first field odd.
404                  */
405                 CRTC_WRITE(PV_V_CONTROL,
406                            PV_VCONTROL_CONTINUOUS |
407                            (is_dsi ? PV_VCONTROL_DSI : 0) |
408                            PV_VCONTROL_INTERLACE |
409                            (odd_field_first
410                                    ? PV_VCONTROL_ODD_FIRST
411                                    : VC4_SET_FIELD(field_delay,
412                                                    PV_VCONTROL_ODD_DELAY)));
413                 CRTC_WRITE(PV_VSYNCD_EVEN,
414                            (odd_field_first ? field_delay : 0));
415         } else {
416                 CRTC_WRITE(PV_V_CONTROL,
417                            PV_VCONTROL_CONTINUOUS |
418                            (is_dsi ? PV_VCONTROL_DSI : 0));
419                 CRTC_WRITE(PV_VSYNCD_EVEN, 0);
420         }
421
422         CRTC_WRITE(PV_VERTA,
423                    VC4_SET_FIELD(vert_bp, PV_VERTA_VBP) |
424                    VC4_SET_FIELD(vert_sync, PV_VERTA_VSYNC));
425         CRTC_WRITE(PV_VERTB,
426                    VC4_SET_FIELD(vert_fp, PV_VERTB_VFP) |
427                    VC4_SET_FIELD(mode->crtc_vdisplay, PV_VERTB_VACTIVE));
428
429         if (is_dsi)
430                 CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep);
431
432         if (vc4->gen == VC4_GEN_5)
433                 CRTC_WRITE(PV_MUX_CFG,
434                            VC4_SET_FIELD(PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP,
435                                          PV_MUX_CFG_RGB_PIXEL_MUX_MODE));
436
437         CRTC_WRITE(PV_CONTROL, PV_CONTROL_FIFO_CLR |
438                    vc4_crtc_get_fifo_full_level_bits(vc4_crtc, format) |
439                    VC4_SET_FIELD(format, PV_CONTROL_FORMAT) |
440                    VC4_SET_FIELD(pixel_rep - 1, PV_CONTROL_PIXEL_REP) |
441                    PV_CONTROL_CLR_AT_START |
442                    PV_CONTROL_TRIGGER_UNDERFLOW |
443                    PV_CONTROL_WAIT_HSTART |
444                    VC4_SET_FIELD(vc4_encoder->clock_select,
445                                  PV_CONTROL_CLK_SELECT));
446
447         if (debug_dump_regs) {
448                 struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev);
449                 dev_info(&vc4_crtc->pdev->dev, "CRTC %d regs after:\n",
450                          drm_crtc_index(crtc));
451                 drm_print_regset32(&p, &vc4_crtc->regset);
452         }
453
454         drm_dev_exit(idx);
455 }
456
457 static void require_hvs_enabled(struct drm_device *dev)
458 {
459         struct vc4_dev *vc4 = to_vc4_dev(dev);
460         struct vc4_hvs *hvs = vc4->hvs;
461
462         WARN_ON_ONCE((HVS_READ(SCALER_DISPCTRL) & SCALER_DISPCTRL_ENABLE) !=
463                      SCALER_DISPCTRL_ENABLE);
464 }
465
466 static int vc4_crtc_disable(struct drm_crtc *crtc,
467                             struct drm_encoder *encoder,
468                             struct drm_atomic_state *state,
469                             unsigned int channel)
470 {
471         struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
472         struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
473         struct drm_device *dev = crtc->dev;
474         struct vc4_dev *vc4 = to_vc4_dev(dev);
475         int idx, ret;
476
477         if (!drm_dev_enter(dev, &idx))
478                 return -ENODEV;
479
480         CRTC_WRITE(PV_V_CONTROL,
481                    CRTC_READ(PV_V_CONTROL) & ~PV_VCONTROL_VIDEN);
482         ret = wait_for(!(CRTC_READ(PV_V_CONTROL) & PV_VCONTROL_VIDEN), 1);
483         WARN_ONCE(ret, "Timeout waiting for !PV_VCONTROL_VIDEN\n");
484
485         /*
486          * This delay is needed to avoid to get a pixel stuck in an
487          * unflushable FIFO between the pixelvalve and the HDMI
488          * controllers on the BCM2711.
489          *
490          * Timing is fairly sensitive here, so mdelay is the safest
491          * approach.
492          *
493          * If it was to be reworked, the stuck pixel happens on a
494          * BCM2711 when changing mode with a good probability, so a
495          * script that changes mode on a regular basis should trigger
496          * the bug after less than 10 attempts. It manifests itself with
497          * every pixels being shifted by one to the right, and thus the
498          * last pixel of a line actually being displayed as the first
499          * pixel on the next line.
500          */
501         mdelay(20);
502
503         if (vc4_encoder && vc4_encoder->post_crtc_disable)
504                 vc4_encoder->post_crtc_disable(encoder, state);
505
506         vc4_crtc_pixelvalve_reset(crtc);
507         vc4_hvs_stop_channel(vc4->hvs, channel);
508
509         if (vc4_encoder && vc4_encoder->post_crtc_powerdown)
510                 vc4_encoder->post_crtc_powerdown(encoder, state);
511
512         drm_dev_exit(idx);
513
514         return 0;
515 }
516
517 int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
518 {
519         struct drm_device *drm = crtc->dev;
520         struct vc4_dev *vc4 = to_vc4_dev(drm);
521         struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
522         enum vc4_encoder_type encoder_type;
523         const struct vc4_pv_data *pv_data;
524         struct drm_encoder *encoder;
525         struct vc4_hdmi *vc4_hdmi;
526         unsigned encoder_sel;
527         int channel;
528         int ret;
529
530         if (!(of_device_is_compatible(vc4_crtc->pdev->dev.of_node,
531                                       "brcm,bcm2711-pixelvalve2") ||
532               of_device_is_compatible(vc4_crtc->pdev->dev.of_node,
533                                       "brcm,bcm2711-pixelvalve4")))
534                 return 0;
535
536         if (!(CRTC_READ(PV_CONTROL) & PV_CONTROL_EN))
537                 return 0;
538
539         if (!(CRTC_READ(PV_V_CONTROL) & PV_VCONTROL_VIDEN))
540                 return 0;
541
542         channel = vc4_hvs_get_fifo_from_output(vc4->hvs, vc4_crtc->data->hvs_output);
543         if (channel < 0)
544                 return 0;
545
546         encoder_sel = VC4_GET_FIELD(CRTC_READ(PV_CONTROL), PV_CONTROL_CLK_SELECT);
547         if (WARN_ON(encoder_sel != 0))
548                 return 0;
549
550         pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
551         encoder_type = pv_data->encoder_types[encoder_sel];
552         encoder = vc4_find_encoder_by_type(drm, encoder_type);
553         if (WARN_ON(!encoder))
554                 return 0;
555
556         vc4_hdmi = encoder_to_vc4_hdmi(encoder);
557         ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
558         if (ret)
559                 return ret;
560
561         ret = vc4_crtc_disable(crtc, encoder, NULL, channel);
562         if (ret)
563                 return ret;
564
565         /*
566          * post_crtc_powerdown will have called pm_runtime_put, so we
567          * don't need it here otherwise we'll get the reference counting
568          * wrong.
569          */
570
571         return 0;
572 }
573
574 void vc4_crtc_send_vblank(struct drm_crtc *crtc)
575 {
576         struct drm_device *dev = crtc->dev;
577         unsigned long flags;
578
579         if (!crtc->state || !crtc->state->event)
580                 return;
581
582         spin_lock_irqsave(&dev->event_lock, flags);
583         drm_crtc_send_vblank_event(crtc, crtc->state->event);
584         crtc->state->event = NULL;
585         spin_unlock_irqrestore(&dev->event_lock, flags);
586 }
587
588 static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
589                                     struct drm_atomic_state *state)
590 {
591         struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
592                                                                          crtc);
593         struct vc4_crtc_state *old_vc4_state = to_vc4_crtc_state(old_state);
594         struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, old_state);
595         struct drm_device *dev = crtc->dev;
596
597         drm_dbg(dev, "Disabling CRTC %s (%u) connected to Encoder %s (%u)",
598                 crtc->name, crtc->base.id, encoder->name, encoder->base.id);
599
600         require_hvs_enabled(dev);
601
602         /* Disable vblank irq handling before crtc is disabled. */
603         drm_crtc_vblank_off(crtc);
604
605         vc4_crtc_disable(crtc, encoder, state, old_vc4_state->assigned_channel);
606
607         /*
608          * Make sure we issue a vblank event after disabling the CRTC if
609          * someone was waiting it.
610          */
611         vc4_crtc_send_vblank(crtc);
612 }
613
614 static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
615                                    struct drm_atomic_state *state)
616 {
617         struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
618                                                                          crtc);
619         struct drm_device *dev = crtc->dev;
620         struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
621         struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, new_state);
622         struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
623         int idx;
624
625         drm_dbg(dev, "Enabling CRTC %s (%u) connected to Encoder %s (%u)",
626                 crtc->name, crtc->base.id, encoder->name, encoder->base.id);
627
628         if (!drm_dev_enter(dev, &idx))
629                 return;
630
631         require_hvs_enabled(dev);
632
633         /* Enable vblank irq handling before crtc is started otherwise
634          * drm_crtc_get_vblank() fails in vc4_crtc_update_dlist().
635          */
636         drm_crtc_vblank_on(crtc);
637
638         vc4_hvs_atomic_enable(crtc, state);
639
640         if (vc4_encoder->pre_crtc_configure)
641                 vc4_encoder->pre_crtc_configure(encoder, state);
642
643         vc4_crtc_config_pv(crtc, encoder, state);
644
645         CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_EN);
646
647         if (vc4_encoder->pre_crtc_enable)
648                 vc4_encoder->pre_crtc_enable(encoder, state);
649
650         /* When feeding the transposer block the pixelvalve is unneeded and
651          * should not be enabled.
652          */
653         CRTC_WRITE(PV_V_CONTROL,
654                    CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
655
656         if (vc4_encoder->post_crtc_enable)
657                 vc4_encoder->post_crtc_enable(encoder, state);
658
659         drm_dev_exit(idx);
660 }
661
662 static enum drm_mode_status vc4_crtc_mode_valid(struct drm_crtc *crtc,
663                                                 const struct drm_display_mode *mode)
664 {
665         /* Do not allow doublescan modes from user space */
666         if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
667                 DRM_DEBUG_KMS("[CRTC:%d] Doublescan mode rejected.\n",
668                               crtc->base.id);
669                 return MODE_NO_DBLESCAN;
670         }
671
672         return MODE_OK;
673 }
674
675 void vc4_crtc_get_margins(struct drm_crtc_state *state,
676                           unsigned int *left, unsigned int *right,
677                           unsigned int *top, unsigned int *bottom)
678 {
679         struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
680         struct drm_connector_state *conn_state;
681         struct drm_connector *conn;
682         int i;
683
684         *left = vc4_state->margins.left;
685         *right = vc4_state->margins.right;
686         *top = vc4_state->margins.top;
687         *bottom = vc4_state->margins.bottom;
688
689         /* We have to interate over all new connector states because
690          * vc4_crtc_get_margins() might be called before
691          * vc4_crtc_atomic_check() which means margins info in vc4_crtc_state
692          * might be outdated.
693          */
694         for_each_new_connector_in_state(state->state, conn, conn_state, i) {
695                 if (conn_state->crtc != state->crtc)
696                         continue;
697
698                 *left = conn_state->tv.margins.left;
699                 *right = conn_state->tv.margins.right;
700                 *top = conn_state->tv.margins.top;
701                 *bottom = conn_state->tv.margins.bottom;
702                 break;
703         }
704 }
705
706 int vc4_crtc_atomic_check(struct drm_crtc *crtc,
707                           struct drm_atomic_state *state)
708 {
709         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
710                                                                           crtc);
711         struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
712         struct drm_connector *conn;
713         struct drm_connector_state *conn_state;
714         struct drm_encoder *encoder;
715         int ret, i;
716
717         ret = vc4_hvs_atomic_check(crtc, state);
718         if (ret)
719                 return ret;
720
721         encoder = vc4_get_crtc_encoder(crtc, crtc_state);
722         if (encoder) {
723                 const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
724                 struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
725
726                 if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) {
727                         vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 8000,
728                                                   mode->clock * 9 / 10) * 1000;
729                 } else {
730                         vc4_state->hvs_load = mode->clock * 1000;
731                 }
732         }
733
734         for_each_new_connector_in_state(state, conn, conn_state,
735                                         i) {
736                 if (conn_state->crtc != crtc)
737                         continue;
738
739                 if (memcmp(&vc4_state->margins, &conn_state->tv.margins,
740                            sizeof(vc4_state->margins))) {
741                         memcpy(&vc4_state->margins, &conn_state->tv.margins,
742                                sizeof(vc4_state->margins));
743
744                         /*
745                          * Need to force the dlist entries for all planes to be
746                          * updated so that the dest rectangles are changed.
747                          */
748                         crtc_state->zpos_changed = true;
749                 }
750                 break;
751         }
752
753         return 0;
754 }
755
756 static int vc4_enable_vblank(struct drm_crtc *crtc)
757 {
758         struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
759         struct drm_device *dev = crtc->dev;
760         int idx;
761
762         if (!drm_dev_enter(dev, &idx))
763                 return -ENODEV;
764
765         CRTC_WRITE(PV_INTEN, PV_INT_VFP_START);
766
767         drm_dev_exit(idx);
768
769         return 0;
770 }
771
772 static void vc4_disable_vblank(struct drm_crtc *crtc)
773 {
774         struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
775         struct drm_device *dev = crtc->dev;
776         int idx;
777
778         if (!drm_dev_enter(dev, &idx))
779                 return;
780
781         CRTC_WRITE(PV_INTEN, 0);
782
783         drm_dev_exit(idx);
784 }
785
786 static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
787 {
788         struct drm_crtc *crtc = &vc4_crtc->base;
789         struct drm_device *dev = crtc->dev;
790         struct vc4_dev *vc4 = to_vc4_dev(dev);
791         struct vc4_hvs *hvs = vc4->hvs;
792         u32 chan = vc4_crtc->current_hvs_channel;
793         unsigned long flags;
794
795         spin_lock_irqsave(&dev->event_lock, flags);
796         spin_lock(&vc4_crtc->irq_lock);
797         if (vc4_crtc->event &&
798             (vc4_crtc->current_dlist == HVS_READ(SCALER_DISPLACTX(chan)) ||
799              vc4_crtc->feeds_txp)) {
800                 drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
801                 vc4_crtc->event = NULL;
802                 drm_crtc_vblank_put(crtc);
803
804                 /* Wait for the page flip to unmask the underrun to ensure that
805                  * the display list was updated by the hardware. Before that
806                  * happens, the HVS will be using the previous display list with
807                  * the CRTC and encoder already reconfigured, leading to
808                  * underruns. This can be seen when reconfiguring the CRTC.
809                  */
810                 vc4_hvs_unmask_underrun(hvs, chan);
811         }
812         spin_unlock(&vc4_crtc->irq_lock);
813         spin_unlock_irqrestore(&dev->event_lock, flags);
814 }
815
816 void vc4_crtc_handle_vblank(struct vc4_crtc *crtc)
817 {
818         crtc->t_vblank = ktime_get();
819         drm_crtc_handle_vblank(&crtc->base);
820         vc4_crtc_handle_page_flip(crtc);
821 }
822
823 static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
824 {
825         struct vc4_crtc *vc4_crtc = data;
826         u32 stat = CRTC_READ(PV_INTSTAT);
827         irqreturn_t ret = IRQ_NONE;
828
829         if (stat & PV_INT_VFP_START) {
830                 CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
831                 vc4_crtc_handle_vblank(vc4_crtc);
832                 ret = IRQ_HANDLED;
833         }
834
835         return ret;
836 }
837
838 struct vc4_async_flip_state {
839         struct drm_crtc *crtc;
840         struct drm_framebuffer *fb;
841         struct drm_framebuffer *old_fb;
842         struct drm_pending_vblank_event *event;
843
844         union {
845                 struct dma_fence_cb fence;
846                 struct vc4_seqno_cb seqno;
847         } cb;
848 };
849
850 /* Called when the V3D execution for the BO being flipped to is done, so that
851  * we can actually update the plane's address to point to it.
852  */
853 static void
854 vc4_async_page_flip_complete(struct vc4_async_flip_state *flip_state)
855 {
856         struct drm_crtc *crtc = flip_state->crtc;
857         struct drm_device *dev = crtc->dev;
858         struct drm_plane *plane = crtc->primary;
859
860         vc4_plane_async_set_fb(plane, flip_state->fb);
861         if (flip_state->event) {
862                 unsigned long flags;
863
864                 spin_lock_irqsave(&dev->event_lock, flags);
865                 drm_crtc_send_vblank_event(crtc, flip_state->event);
866                 spin_unlock_irqrestore(&dev->event_lock, flags);
867         }
868
869         drm_crtc_vblank_put(crtc);
870         drm_framebuffer_put(flip_state->fb);
871
872         if (flip_state->old_fb)
873                 drm_framebuffer_put(flip_state->old_fb);
874
875         kfree(flip_state);
876 }
877
878 static void vc4_async_page_flip_seqno_complete(struct vc4_seqno_cb *cb)
879 {
880         struct vc4_async_flip_state *flip_state =
881                 container_of(cb, struct vc4_async_flip_state, cb.seqno);
882         struct vc4_bo *bo = NULL;
883
884         if (flip_state->old_fb) {
885                 struct drm_gem_dma_object *dma_bo =
886                         drm_fb_dma_get_gem_obj(flip_state->old_fb, 0);
887                 bo = to_vc4_bo(&dma_bo->base);
888         }
889
890         vc4_async_page_flip_complete(flip_state);
891
892         /*
893          * Decrement the BO usecnt in order to keep the inc/dec
894          * calls balanced when the planes are updated through
895          * the async update path.
896          *
897          * FIXME: we should move to generic async-page-flip when
898          * it's available, so that we can get rid of this
899          * hand-made cleanup_fb() logic.
900          */
901         if (bo)
902                 vc4_bo_dec_usecnt(bo);
903 }
904
905 static void vc4_async_page_flip_fence_complete(struct dma_fence *fence,
906                                                struct dma_fence_cb *cb)
907 {
908         struct vc4_async_flip_state *flip_state =
909                 container_of(cb, struct vc4_async_flip_state, cb.fence);
910
911         vc4_async_page_flip_complete(flip_state);
912         dma_fence_put(fence);
913 }
914
915 static int vc4_async_set_fence_cb(struct drm_device *dev,
916                                   struct vc4_async_flip_state *flip_state)
917 {
918         struct drm_framebuffer *fb = flip_state->fb;
919         struct drm_gem_dma_object *dma_bo = drm_fb_dma_get_gem_obj(fb, 0);
920         struct vc4_dev *vc4 = to_vc4_dev(dev);
921         struct dma_fence *fence;
922         int ret;
923
924         if (vc4->gen == VC4_GEN_4) {
925                 struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
926
927                 return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno,
928                                           vc4_async_page_flip_seqno_complete);
929         }
930
931         ret = dma_resv_get_singleton(dma_bo->base.resv, DMA_RESV_USAGE_READ, &fence);
932         if (ret)
933                 return ret;
934
935         /* If there's no fence, complete the page flip immediately */
936         if (!fence) {
937                 vc4_async_page_flip_fence_complete(fence, &flip_state->cb.fence);
938                 return 0;
939         }
940
941         /* If the fence has already been completed, complete the page flip */
942         if (dma_fence_add_callback(fence, &flip_state->cb.fence,
943                                    vc4_async_page_flip_fence_complete))
944                 vc4_async_page_flip_fence_complete(fence, &flip_state->cb.fence);
945
946         return 0;
947 }
948
949 static int
950 vc4_async_page_flip_common(struct drm_crtc *crtc,
951                            struct drm_framebuffer *fb,
952                            struct drm_pending_vblank_event *event,
953                            uint32_t flags)
954 {
955         struct drm_device *dev = crtc->dev;
956         struct drm_plane *plane = crtc->primary;
957         struct vc4_async_flip_state *flip_state;
958
959         flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL);
960         if (!flip_state)
961                 return -ENOMEM;
962
963         drm_framebuffer_get(fb);
964         flip_state->fb = fb;
965         flip_state->crtc = crtc;
966         flip_state->event = event;
967
968         /* Save the current FB before it's replaced by the new one in
969          * drm_atomic_set_fb_for_plane(). We'll need the old FB in
970          * vc4_async_page_flip_complete() to decrement the BO usecnt and keep
971          * it consistent.
972          * FIXME: we should move to generic async-page-flip when it's
973          * available, so that we can get rid of this hand-made cleanup_fb()
974          * logic.
975          */
976         flip_state->old_fb = plane->state->fb;
977         if (flip_state->old_fb)
978                 drm_framebuffer_get(flip_state->old_fb);
979
980         WARN_ON(drm_crtc_vblank_get(crtc) != 0);
981
982         /* Immediately update the plane's legacy fb pointer, so that later
983          * modeset prep sees the state that will be present when the semaphore
984          * is released.
985          */
986         drm_atomic_set_fb_for_plane(plane->state, fb);
987
988         vc4_async_set_fence_cb(dev, flip_state);
989
990         /* Driver takes ownership of state on successful async commit. */
991         return 0;
992 }
993
994 /* Implements async (non-vblank-synced) page flips.
995  *
996  * The page flip ioctl needs to return immediately, so we grab the
997  * modeset semaphore on the pipe, and queue the address update for
998  * when V3D is done with the BO being flipped to.
999  */
1000 static int vc4_async_page_flip(struct drm_crtc *crtc,
1001                                struct drm_framebuffer *fb,
1002                                struct drm_pending_vblank_event *event,
1003                                uint32_t flags)
1004 {
1005         struct drm_device *dev = crtc->dev;
1006         struct vc4_dev *vc4 = to_vc4_dev(dev);
1007         struct drm_gem_dma_object *dma_bo = drm_fb_dma_get_gem_obj(fb, 0);
1008         struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
1009         int ret;
1010
1011         if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
1012                 return -ENODEV;
1013
1014         /*
1015          * Increment the BO usecnt here, so that we never end up with an
1016          * unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the
1017          * plane is later updated through the non-async path.
1018          *
1019          * FIXME: we should move to generic async-page-flip when
1020          * it's available, so that we can get rid of this
1021          * hand-made prepare_fb() logic.
1022          */
1023         ret = vc4_bo_inc_usecnt(bo);
1024         if (ret)
1025                 return ret;
1026
1027         ret = vc4_async_page_flip_common(crtc, fb, event, flags);
1028         if (ret) {
1029                 vc4_bo_dec_usecnt(bo);
1030                 return ret;
1031         }
1032
1033         return 0;
1034 }
1035
1036 static int vc5_async_page_flip(struct drm_crtc *crtc,
1037                                struct drm_framebuffer *fb,
1038                                struct drm_pending_vblank_event *event,
1039                                uint32_t flags)
1040 {
1041         return vc4_async_page_flip_common(crtc, fb, event, flags);
1042 }
1043
1044 int vc4_page_flip(struct drm_crtc *crtc,
1045                   struct drm_framebuffer *fb,
1046                   struct drm_pending_vblank_event *event,
1047                   uint32_t flags,
1048                   struct drm_modeset_acquire_ctx *ctx)
1049 {
1050         if (flags & DRM_MODE_PAGE_FLIP_ASYNC) {
1051                 struct drm_device *dev = crtc->dev;
1052                 struct vc4_dev *vc4 = to_vc4_dev(dev);
1053
1054                 if (vc4->gen > VC4_GEN_4)
1055                         return vc5_async_page_flip(crtc, fb, event, flags);
1056                 else
1057                         return vc4_async_page_flip(crtc, fb, event, flags);
1058         } else {
1059                 return drm_atomic_helper_page_flip(crtc, fb, event, flags, ctx);
1060         }
1061 }
1062
1063 struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc)
1064 {
1065         struct vc4_crtc_state *vc4_state, *old_vc4_state;
1066
1067         vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL);
1068         if (!vc4_state)
1069                 return NULL;
1070
1071         old_vc4_state = to_vc4_crtc_state(crtc->state);
1072         vc4_state->margins = old_vc4_state->margins;
1073         vc4_state->assigned_channel = old_vc4_state->assigned_channel;
1074
1075         __drm_atomic_helper_crtc_duplicate_state(crtc, &vc4_state->base);
1076         return &vc4_state->base;
1077 }
1078
1079 void vc4_crtc_destroy_state(struct drm_crtc *crtc,
1080                             struct drm_crtc_state *state)
1081 {
1082         struct vc4_dev *vc4 = to_vc4_dev(crtc->dev);
1083         struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
1084
1085         if (drm_mm_node_allocated(&vc4_state->mm)) {
1086                 unsigned long flags;
1087
1088                 spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
1089                 drm_mm_remove_node(&vc4_state->mm);
1090                 spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags);
1091
1092         }
1093
1094         drm_atomic_helper_crtc_destroy_state(crtc, state);
1095 }
1096
1097 void vc4_crtc_reset(struct drm_crtc *crtc)
1098 {
1099         struct vc4_crtc_state *vc4_crtc_state;
1100
1101         if (crtc->state)
1102                 vc4_crtc_destroy_state(crtc, crtc->state);
1103
1104         vc4_crtc_state = kzalloc(sizeof(*vc4_crtc_state), GFP_KERNEL);
1105         if (!vc4_crtc_state) {
1106                 crtc->state = NULL;
1107                 return;
1108         }
1109
1110         vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
1111         __drm_atomic_helper_crtc_reset(crtc, &vc4_crtc_state->base);
1112 }
1113
1114 int vc4_crtc_late_register(struct drm_crtc *crtc)
1115 {
1116         struct drm_device *drm = crtc->dev;
1117         struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
1118         const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc);
1119
1120         vc4_debugfs_add_regset32(drm, crtc_data->debugfs_name,
1121                                  &vc4_crtc->regset);
1122
1123         return 0;
1124 }
1125
1126 static const struct drm_crtc_funcs vc4_crtc_funcs = {
1127         .set_config = drm_atomic_helper_set_config,
1128         .page_flip = vc4_page_flip,
1129         .set_property = NULL,
1130         .cursor_set = NULL, /* handled by drm_mode_cursor_universal */
1131         .cursor_move = NULL, /* handled by drm_mode_cursor_universal */
1132         .reset = vc4_crtc_reset,
1133         .atomic_duplicate_state = vc4_crtc_duplicate_state,
1134         .atomic_destroy_state = vc4_crtc_destroy_state,
1135         .enable_vblank = vc4_enable_vblank,
1136         .disable_vblank = vc4_disable_vblank,
1137         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
1138         .late_register = vc4_crtc_late_register,
1139 };
1140
1141 static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
1142         .mode_valid = vc4_crtc_mode_valid,
1143         .atomic_check = vc4_crtc_atomic_check,
1144         .atomic_begin = vc4_hvs_atomic_begin,
1145         .atomic_flush = vc4_hvs_atomic_flush,
1146         .atomic_enable = vc4_crtc_atomic_enable,
1147         .atomic_disable = vc4_crtc_atomic_disable,
1148         .get_scanout_position = vc4_crtc_get_scanout_position,
1149 };
1150
1151 const struct vc4_pv_data bcm2835_pv0_data = {
1152         .base = {
1153                 .name = "pixelvalve-0",
1154                 .debugfs_name = "crtc0_regs",
1155                 .hvs_available_channels = BIT(0),
1156                 .hvs_output = 0,
1157         },
1158         .fifo_depth = 64,
1159         .pixels_per_clock = 1,
1160         .encoder_types = {
1161                 [PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI0,
1162                 [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_DPI,
1163         },
1164 };
1165
1166 const struct vc4_pv_data bcm2835_pv1_data = {
1167         .base = {
1168                 .name = "pixelvalve-1",
1169                 .debugfs_name = "crtc1_regs",
1170                 .hvs_available_channels = BIT(2),
1171                 .hvs_output = 2,
1172         },
1173         .fifo_depth = 64,
1174         .pixels_per_clock = 1,
1175         .encoder_types = {
1176                 [PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI1,
1177                 [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_SMI,
1178         },
1179 };
1180
1181 const struct vc4_pv_data bcm2835_pv2_data = {
1182         .base = {
1183                 .name = "pixelvalve-2",
1184                 .debugfs_name = "crtc2_regs",
1185                 .hvs_available_channels = BIT(1),
1186                 .hvs_output = 1,
1187         },
1188         .fifo_depth = 64,
1189         .pixels_per_clock = 1,
1190         .encoder_types = {
1191                 [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_HDMI0,
1192                 [PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC,
1193         },
1194 };
1195
1196 const struct vc4_pv_data bcm2711_pv0_data = {
1197         .base = {
1198                 .name = "pixelvalve-0",
1199                 .debugfs_name = "crtc0_regs",
1200                 .hvs_available_channels = BIT(0),
1201                 .hvs_output = 0,
1202         },
1203         .fifo_depth = 64,
1204         .pixels_per_clock = 1,
1205         .encoder_types = {
1206                 [0] = VC4_ENCODER_TYPE_DSI0,
1207                 [1] = VC4_ENCODER_TYPE_DPI,
1208         },
1209 };
1210
1211 const struct vc4_pv_data bcm2711_pv1_data = {
1212         .base = {
1213                 .name = "pixelvalve-1",
1214                 .debugfs_name = "crtc1_regs",
1215                 .hvs_available_channels = BIT(0) | BIT(1) | BIT(2),
1216                 .hvs_output = 3,
1217         },
1218         .fifo_depth = 64,
1219         .pixels_per_clock = 1,
1220         .encoder_types = {
1221                 [0] = VC4_ENCODER_TYPE_DSI1,
1222                 [1] = VC4_ENCODER_TYPE_SMI,
1223         },
1224 };
1225
1226 const struct vc4_pv_data bcm2711_pv2_data = {
1227         .base = {
1228                 .name = "pixelvalve-2",
1229                 .debugfs_name = "crtc2_regs",
1230                 .hvs_available_channels = BIT(0) | BIT(1) | BIT(2),
1231                 .hvs_output = 4,
1232         },
1233         .fifo_depth = 256,
1234         .pixels_per_clock = 2,
1235         .encoder_types = {
1236                 [0] = VC4_ENCODER_TYPE_HDMI0,
1237         },
1238 };
1239
1240 const struct vc4_pv_data bcm2711_pv3_data = {
1241         .base = {
1242                 .name = "pixelvalve-3",
1243                 .debugfs_name = "crtc3_regs",
1244                 .hvs_available_channels = BIT(1),
1245                 .hvs_output = 1,
1246         },
1247         .fifo_depth = 64,
1248         .pixels_per_clock = 1,
1249         .encoder_types = {
1250                 [PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC,
1251         },
1252 };
1253
1254 const struct vc4_pv_data bcm2711_pv4_data = {
1255         .base = {
1256                 .name = "pixelvalve-4",
1257                 .debugfs_name = "crtc4_regs",
1258                 .hvs_available_channels = BIT(0) | BIT(1) | BIT(2),
1259                 .hvs_output = 5,
1260         },
1261         .fifo_depth = 64,
1262         .pixels_per_clock = 2,
1263         .encoder_types = {
1264                 [0] = VC4_ENCODER_TYPE_HDMI1,
1265         },
1266 };
1267
1268 static const struct of_device_id vc4_crtc_dt_match[] = {
1269         { .compatible = "brcm,bcm2835-pixelvalve0", .data = &bcm2835_pv0_data },
1270         { .compatible = "brcm,bcm2835-pixelvalve1", .data = &bcm2835_pv1_data },
1271         { .compatible = "brcm,bcm2835-pixelvalve2", .data = &bcm2835_pv2_data },
1272         { .compatible = "brcm,bcm2711-pixelvalve0", .data = &bcm2711_pv0_data },
1273         { .compatible = "brcm,bcm2711-pixelvalve1", .data = &bcm2711_pv1_data },
1274         { .compatible = "brcm,bcm2711-pixelvalve2", .data = &bcm2711_pv2_data },
1275         { .compatible = "brcm,bcm2711-pixelvalve3", .data = &bcm2711_pv3_data },
1276         { .compatible = "brcm,bcm2711-pixelvalve4", .data = &bcm2711_pv4_data },
1277         {}
1278 };
1279
1280 static void vc4_set_crtc_possible_masks(struct drm_device *drm,
1281                                         struct drm_crtc *crtc)
1282 {
1283         struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
1284         const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
1285         const enum vc4_encoder_type *encoder_types = pv_data->encoder_types;
1286         struct drm_encoder *encoder;
1287
1288         drm_for_each_encoder(encoder, drm) {
1289                 struct vc4_encoder *vc4_encoder;
1290                 int i;
1291
1292                 if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
1293                         continue;
1294
1295                 vc4_encoder = to_vc4_encoder(encoder);
1296                 for (i = 0; i < ARRAY_SIZE(pv_data->encoder_types); i++) {
1297                         if (vc4_encoder->type == encoder_types[i]) {
1298                                 vc4_encoder->clock_select = i;
1299                                 encoder->possible_crtcs |= drm_crtc_mask(crtc);
1300                                 break;
1301                         }
1302                 }
1303         }
1304 }
1305
1306 /**
1307  * __vc4_crtc_init - Initializes a CRTC
1308  * @drm: DRM Device
1309  * @pdev: CRTC Platform Device
1310  * @vc4_crtc: CRTC Object to Initialize
1311  * @data: Configuration data associated with this CRTC
1312  * @primary_plane: Primary plane for CRTC
1313  * @crtc_funcs: Callbacks for the new CRTC
1314  * @crtc_helper_funcs: Helper Callbacks for the new CRTC
1315  * @feeds_txp: Is this CRTC connected to the TXP?
1316  *
1317  * Initializes our private CRTC structure. This function is mostly
1318  * relevant for KUnit testing, all other users should use
1319  * vc4_crtc_init() instead.
1320  *
1321  * Returns:
1322  * 0 on success, a negative error code on failure.
1323  */
1324 int __vc4_crtc_init(struct drm_device *drm,
1325                     struct platform_device *pdev,
1326                     struct vc4_crtc *vc4_crtc,
1327                     const struct vc4_crtc_data *data,
1328                     struct drm_plane *primary_plane,
1329                     const struct drm_crtc_funcs *crtc_funcs,
1330                     const struct drm_crtc_helper_funcs *crtc_helper_funcs,
1331                     bool feeds_txp)
1332 {
1333         struct vc4_dev *vc4 = to_vc4_dev(drm);
1334         struct drm_crtc *crtc = &vc4_crtc->base;
1335         unsigned int i;
1336         int ret;
1337
1338         vc4_crtc->data = data;
1339         vc4_crtc->pdev = pdev;
1340         vc4_crtc->feeds_txp = feeds_txp;
1341         spin_lock_init(&vc4_crtc->irq_lock);
1342         ret = drmm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
1343                                          crtc_funcs, data->name);
1344         if (ret)
1345                 return ret;
1346
1347         drm_crtc_helper_add(crtc, crtc_helper_funcs);
1348
1349         if (vc4->gen == VC4_GEN_4) {
1350                 drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
1351                 drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size);
1352
1353                 /* We support CTM, but only for one CRTC at a time. It's therefore
1354                  * implemented as private driver state in vc4_kms, not here.
1355                  */
1356                 drm_crtc_enable_color_mgmt(crtc, 0, true, crtc->gamma_size);
1357         }
1358
1359         for (i = 0; i < crtc->gamma_size; i++) {
1360                 vc4_crtc->lut_r[i] = i;
1361                 vc4_crtc->lut_g[i] = i;
1362                 vc4_crtc->lut_b[i] = i;
1363         }
1364
1365         return 0;
1366 }
1367
1368 int vc4_crtc_init(struct drm_device *drm, struct platform_device *pdev,
1369                   struct vc4_crtc *vc4_crtc,
1370                   const struct vc4_crtc_data *data,
1371                   const struct drm_crtc_funcs *crtc_funcs,
1372                   const struct drm_crtc_helper_funcs *crtc_helper_funcs,
1373                   bool feeds_txp)
1374 {
1375         struct drm_plane *primary_plane;
1376
1377         /* For now, we create just the primary and the legacy cursor
1378          * planes.  We should be able to stack more planes on easily,
1379          * but to do that we would need to compute the bandwidth
1380          * requirement of the plane configuration, and reject ones
1381          * that will take too much.
1382          */
1383         primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY, 0);
1384         if (IS_ERR(primary_plane)) {
1385                 dev_err(drm->dev, "failed to construct primary plane\n");
1386                 return PTR_ERR(primary_plane);
1387         }
1388
1389         return __vc4_crtc_init(drm, pdev, vc4_crtc, data, primary_plane,
1390                                crtc_funcs, crtc_helper_funcs, feeds_txp);
1391 }
1392
1393 static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
1394 {
1395         struct platform_device *pdev = to_platform_device(dev);
1396         struct drm_device *drm = dev_get_drvdata(master);
1397         const struct vc4_pv_data *pv_data;
1398         struct vc4_crtc *vc4_crtc;
1399         struct drm_crtc *crtc;
1400         int ret;
1401
1402         vc4_crtc = drmm_kzalloc(drm, sizeof(*vc4_crtc), GFP_KERNEL);
1403         if (!vc4_crtc)
1404                 return -ENOMEM;
1405         crtc = &vc4_crtc->base;
1406
1407         pv_data = of_device_get_match_data(dev);
1408         if (!pv_data)
1409                 return -ENODEV;
1410
1411         vc4_crtc->regs = vc4_ioremap_regs(pdev, 0);
1412         if (IS_ERR(vc4_crtc->regs))
1413                 return PTR_ERR(vc4_crtc->regs);
1414
1415         vc4_crtc->regset.base = vc4_crtc->regs;
1416         vc4_crtc->regset.regs = crtc_regs;
1417         vc4_crtc->regset.nregs = ARRAY_SIZE(crtc_regs);
1418
1419         ret = vc4_crtc_init(drm, pdev, vc4_crtc, &pv_data->base,
1420                             &vc4_crtc_funcs, &vc4_crtc_helper_funcs,
1421                             false);
1422         if (ret)
1423                 return ret;
1424         vc4_set_crtc_possible_masks(drm, crtc);
1425
1426         CRTC_WRITE(PV_INTEN, 0);
1427         CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
1428         ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
1429                                vc4_crtc_irq_handler,
1430                                IRQF_SHARED,
1431                                "vc4 crtc", vc4_crtc);
1432         if (ret)
1433                 return ret;
1434
1435         platform_set_drvdata(pdev, vc4_crtc);
1436
1437         return 0;
1438 }
1439
1440 static void vc4_crtc_unbind(struct device *dev, struct device *master,
1441                             void *data)
1442 {
1443         struct platform_device *pdev = to_platform_device(dev);
1444         struct vc4_crtc *vc4_crtc = dev_get_drvdata(dev);
1445
1446         CRTC_WRITE(PV_INTEN, 0);
1447
1448         platform_set_drvdata(pdev, NULL);
1449 }
1450
1451 static const struct component_ops vc4_crtc_ops = {
1452         .bind   = vc4_crtc_bind,
1453         .unbind = vc4_crtc_unbind,
1454 };
1455
1456 static int vc4_crtc_dev_probe(struct platform_device *pdev)
1457 {
1458         return component_add(&pdev->dev, &vc4_crtc_ops);
1459 }
1460
1461 static void vc4_crtc_dev_remove(struct platform_device *pdev)
1462 {
1463         component_del(&pdev->dev, &vc4_crtc_ops);
1464 }
1465
1466 struct platform_driver vc4_crtc_driver = {
1467         .probe = vc4_crtc_dev_probe,
1468         .remove = vc4_crtc_dev_remove,
1469         .driver = {
1470                 .name = "vc4_crtc",
1471                 .of_match_table = vc4_crtc_dt_match,
1472         },
1473 };
This page took 0.110291 seconds and 4 git commands to generate.