]> Git Repo - linux.git/blob - drivers/gpu/drm/vc4/vc4_kms.c
Merge tag 'drm-misc-next-2020-09-21' of git://anongit.freedesktop.org/drm/drm-misc...
[linux.git] / drivers / gpu / drm / vc4 / vc4_kms.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 Broadcom
4  */
5
6 /**
7  * DOC: VC4 KMS
8  *
9  * This is the general code for implementing KMS mode setting that
10  * doesn't clearly associate with any of the other objects (plane,
11  * crtc, HDMI encoder).
12  */
13
14 #include <linux/clk.h>
15
16 #include <drm/drm_atomic.h>
17 #include <drm/drm_atomic_helper.h>
18 #include <drm/drm_crtc.h>
19 #include <drm/drm_gem_framebuffer_helper.h>
20 #include <drm/drm_plane_helper.h>
21 #include <drm/drm_probe_helper.h>
22 #include <drm/drm_vblank.h>
23
24 #include "vc4_drv.h"
25 #include "vc4_regs.h"
26
27 struct vc4_ctm_state {
28         struct drm_private_state base;
29         struct drm_color_ctm *ctm;
30         int fifo;
31 };
32
33 static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
34 {
35         return container_of(priv, struct vc4_ctm_state, base);
36 }
37
38 struct vc4_load_tracker_state {
39         struct drm_private_state base;
40         u64 hvs_load;
41         u64 membus_load;
42 };
43
44 static struct vc4_load_tracker_state *
45 to_vc4_load_tracker_state(struct drm_private_state *priv)
46 {
47         return container_of(priv, struct vc4_load_tracker_state, base);
48 }
49
50 static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
51                                                struct drm_private_obj *manager)
52 {
53         struct drm_device *dev = state->dev;
54         struct vc4_dev *vc4 = dev->dev_private;
55         struct drm_private_state *priv_state;
56         int ret;
57
58         ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
59         if (ret)
60                 return ERR_PTR(ret);
61
62         priv_state = drm_atomic_get_private_obj_state(state, manager);
63         if (IS_ERR(priv_state))
64                 return ERR_CAST(priv_state);
65
66         return to_vc4_ctm_state(priv_state);
67 }
68
69 static struct drm_private_state *
70 vc4_ctm_duplicate_state(struct drm_private_obj *obj)
71 {
72         struct vc4_ctm_state *state;
73
74         state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
75         if (!state)
76                 return NULL;
77
78         __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
79
80         return &state->base;
81 }
82
83 static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
84                                   struct drm_private_state *state)
85 {
86         struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
87
88         kfree(ctm_state);
89 }
90
91 static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
92         .atomic_duplicate_state = vc4_ctm_duplicate_state,
93         .atomic_destroy_state = vc4_ctm_destroy_state,
94 };
95
96 /* Converts a DRM S31.32 value to the HW S0.9 format. */
97 static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
98 {
99         u16 r;
100
101         /* Sign bit. */
102         r = in & BIT_ULL(63) ? BIT(9) : 0;
103
104         if ((in & GENMASK_ULL(62, 32)) > 0) {
105                 /* We have zero integer bits so we can only saturate here. */
106                 r |= GENMASK(8, 0);
107         } else {
108                 /* Otherwise take the 9 most important fractional bits. */
109                 r |= (in >> 23) & GENMASK(8, 0);
110         }
111
112         return r;
113 }
114
115 static void
116 vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
117 {
118         struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
119         struct drm_color_ctm *ctm = ctm_state->ctm;
120
121         if (ctm_state->fifo) {
122                 HVS_WRITE(SCALER_OLEDCOEF2,
123                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
124                                         SCALER_OLEDCOEF2_R_TO_R) |
125                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
126                                         SCALER_OLEDCOEF2_R_TO_G) |
127                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
128                                         SCALER_OLEDCOEF2_R_TO_B));
129                 HVS_WRITE(SCALER_OLEDCOEF1,
130                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
131                                         SCALER_OLEDCOEF1_G_TO_R) |
132                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
133                                         SCALER_OLEDCOEF1_G_TO_G) |
134                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
135                                         SCALER_OLEDCOEF1_G_TO_B));
136                 HVS_WRITE(SCALER_OLEDCOEF0,
137                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
138                                         SCALER_OLEDCOEF0_B_TO_R) |
139                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
140                                         SCALER_OLEDCOEF0_B_TO_G) |
141                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
142                                         SCALER_OLEDCOEF0_B_TO_B));
143         }
144
145         HVS_WRITE(SCALER_OLEDOFFS,
146                   VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
147 }
148
149 static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
150                                      struct drm_atomic_state *state)
151 {
152         struct drm_crtc_state *crtc_state;
153         struct drm_crtc *crtc;
154         unsigned int i;
155
156         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
157                 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
158                 u32 dispctrl;
159                 u32 dsp3_mux;
160
161                 if (!crtc_state->active)
162                         continue;
163
164                 if (vc4_state->assigned_channel != 2)
165                         continue;
166
167                 /*
168                  * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to
169                  * FIFO X'.
170                  * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'.
171                  *
172                  * DSP3 is connected to FIFO2 unless the transposer is
173                  * enabled. In this case, FIFO 2 is directly accessed by the
174                  * TXP IP, and we need to disable the FIFO2 -> pixelvalve1
175                  * route.
176                  */
177                 if (vc4_state->feed_txp)
178                         dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX);
179                 else
180                         dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
181
182                 dispctrl = HVS_READ(SCALER_DISPCTRL) &
183                            ~SCALER_DISPCTRL_DSP3_MUX_MASK;
184                 HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux);
185         }
186 }
187
188 static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
189                                      struct drm_atomic_state *state)
190 {
191         struct drm_crtc_state *crtc_state;
192         struct drm_crtc *crtc;
193         unsigned char dsp2_mux = 0;
194         unsigned char dsp3_mux = 3;
195         unsigned char dsp4_mux = 3;
196         unsigned char dsp5_mux = 3;
197         unsigned int i;
198         u32 reg;
199
200         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
201                 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
202                 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
203
204                 if (!crtc_state->active)
205                         continue;
206
207                 switch (vc4_crtc->data->hvs_output) {
208                 case 2:
209                         dsp2_mux = (vc4_state->assigned_channel == 2) ? 0 : 1;
210                         break;
211
212                 case 3:
213                         dsp3_mux = vc4_state->assigned_channel;
214                         break;
215
216                 case 4:
217                         dsp4_mux = vc4_state->assigned_channel;
218                         break;
219
220                 case 5:
221                         dsp5_mux = vc4_state->assigned_channel;
222                         break;
223
224                 default:
225                         break;
226                 }
227         }
228
229         reg = HVS_READ(SCALER_DISPECTRL);
230         HVS_WRITE(SCALER_DISPECTRL,
231                   (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
232                   VC4_SET_FIELD(dsp2_mux, SCALER_DISPECTRL_DSP2_MUX));
233
234         reg = HVS_READ(SCALER_DISPCTRL);
235         HVS_WRITE(SCALER_DISPCTRL,
236                   (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
237                   VC4_SET_FIELD(dsp3_mux, SCALER_DISPCTRL_DSP3_MUX));
238
239         reg = HVS_READ(SCALER_DISPEOLN);
240         HVS_WRITE(SCALER_DISPEOLN,
241                   (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
242                   VC4_SET_FIELD(dsp4_mux, SCALER_DISPEOLN_DSP4_MUX));
243
244         reg = HVS_READ(SCALER_DISPDITHER);
245         HVS_WRITE(SCALER_DISPDITHER,
246                   (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
247                   VC4_SET_FIELD(dsp5_mux, SCALER_DISPDITHER_DSP5_MUX));
248 }
249
250 static void
251 vc4_atomic_complete_commit(struct drm_atomic_state *state)
252 {
253         struct drm_device *dev = state->dev;
254         struct vc4_dev *vc4 = to_vc4_dev(dev);
255         struct vc4_hvs *hvs = vc4->hvs;
256         struct drm_crtc_state *new_crtc_state;
257         struct drm_crtc *crtc;
258         int i;
259
260         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
261                 struct vc4_crtc_state *vc4_crtc_state;
262
263                 if (!new_crtc_state->commit)
264                         continue;
265
266                 vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
267                 vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
268         }
269
270         if (vc4->hvs->hvs5)
271                 clk_set_min_rate(hvs->core_clk, 500000000);
272
273         drm_atomic_helper_wait_for_fences(dev, state, false);
274
275         drm_atomic_helper_wait_for_dependencies(state);
276
277         drm_atomic_helper_commit_modeset_disables(dev, state);
278
279         vc4_ctm_commit(vc4, state);
280
281         if (vc4->hvs->hvs5)
282                 vc5_hvs_pv_muxing_commit(vc4, state);
283         else
284                 vc4_hvs_pv_muxing_commit(vc4, state);
285
286         drm_atomic_helper_commit_planes(dev, state, 0);
287
288         drm_atomic_helper_commit_modeset_enables(dev, state);
289
290         drm_atomic_helper_fake_vblank(state);
291
292         drm_atomic_helper_commit_hw_done(state);
293
294         drm_atomic_helper_wait_for_flip_done(dev, state);
295
296         drm_atomic_helper_cleanup_planes(dev, state);
297
298         drm_atomic_helper_commit_cleanup_done(state);
299
300         if (vc4->hvs->hvs5)
301                 clk_set_min_rate(hvs->core_clk, 0);
302
303         drm_atomic_state_put(state);
304
305         up(&vc4->async_modeset);
306 }
307
308 static void commit_work(struct work_struct *work)
309 {
310         struct drm_atomic_state *state = container_of(work,
311                                                       struct drm_atomic_state,
312                                                       commit_work);
313         vc4_atomic_complete_commit(state);
314 }
315
316 /**
317  * vc4_atomic_commit - commit validated state object
318  * @dev: DRM device
319  * @state: the driver state object
320  * @nonblock: nonblocking commit
321  *
322  * This function commits a with drm_atomic_helper_check() pre-validated state
323  * object. This can still fail when e.g. the framebuffer reservation fails. For
324  * now this doesn't implement asynchronous commits.
325  *
326  * RETURNS
327  * Zero for success or -errno.
328  */
329 static int vc4_atomic_commit(struct drm_device *dev,
330                              struct drm_atomic_state *state,
331                              bool nonblock)
332 {
333         struct vc4_dev *vc4 = to_vc4_dev(dev);
334         int ret;
335
336         if (state->async_update) {
337                 ret = down_interruptible(&vc4->async_modeset);
338                 if (ret)
339                         return ret;
340
341                 ret = drm_atomic_helper_prepare_planes(dev, state);
342                 if (ret) {
343                         up(&vc4->async_modeset);
344                         return ret;
345                 }
346
347                 drm_atomic_helper_async_commit(dev, state);
348
349                 drm_atomic_helper_cleanup_planes(dev, state);
350
351                 up(&vc4->async_modeset);
352
353                 return 0;
354         }
355
356         /* We know for sure we don't want an async update here. Set
357          * state->legacy_cursor_update to false to prevent
358          * drm_atomic_helper_setup_commit() from auto-completing
359          * commit->flip_done.
360          */
361         state->legacy_cursor_update = false;
362         ret = drm_atomic_helper_setup_commit(state, nonblock);
363         if (ret)
364                 return ret;
365
366         INIT_WORK(&state->commit_work, commit_work);
367
368         ret = down_interruptible(&vc4->async_modeset);
369         if (ret)
370                 return ret;
371
372         ret = drm_atomic_helper_prepare_planes(dev, state);
373         if (ret) {
374                 up(&vc4->async_modeset);
375                 return ret;
376         }
377
378         if (!nonblock) {
379                 ret = drm_atomic_helper_wait_for_fences(dev, state, true);
380                 if (ret) {
381                         drm_atomic_helper_cleanup_planes(dev, state);
382                         up(&vc4->async_modeset);
383                         return ret;
384                 }
385         }
386
387         /*
388          * This is the point of no return - everything below never fails except
389          * when the hw goes bonghits. Which means we can commit the new state on
390          * the software side now.
391          */
392
393         BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
394
395         /*
396          * Everything below can be run asynchronously without the need to grab
397          * any modeset locks at all under one condition: It must be guaranteed
398          * that the asynchronous work has either been cancelled (if the driver
399          * supports it, which at least requires that the framebuffers get
400          * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
401          * before the new state gets committed on the software side with
402          * drm_atomic_helper_swap_state().
403          *
404          * This scheme allows new atomic state updates to be prepared and
405          * checked in parallel to the asynchronous completion of the previous
406          * update. Which is important since compositors need to figure out the
407          * composition of the next frame right after having submitted the
408          * current layout.
409          */
410
411         drm_atomic_state_get(state);
412         if (nonblock)
413                 queue_work(system_unbound_wq, &state->commit_work);
414         else
415                 vc4_atomic_complete_commit(state);
416
417         return 0;
418 }
419
420 static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
421                                              struct drm_file *file_priv,
422                                              const struct drm_mode_fb_cmd2 *mode_cmd)
423 {
424         struct drm_mode_fb_cmd2 mode_cmd_local;
425
426         /* If the user didn't specify a modifier, use the
427          * vc4_set_tiling_ioctl() state for the BO.
428          */
429         if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
430                 struct drm_gem_object *gem_obj;
431                 struct vc4_bo *bo;
432
433                 gem_obj = drm_gem_object_lookup(file_priv,
434                                                 mode_cmd->handles[0]);
435                 if (!gem_obj) {
436                         DRM_DEBUG("Failed to look up GEM BO %d\n",
437                                   mode_cmd->handles[0]);
438                         return ERR_PTR(-ENOENT);
439                 }
440                 bo = to_vc4_bo(gem_obj);
441
442                 mode_cmd_local = *mode_cmd;
443
444                 if (bo->t_format) {
445                         mode_cmd_local.modifier[0] =
446                                 DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
447                 } else {
448                         mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
449                 }
450
451                 drm_gem_object_put(gem_obj);
452
453                 mode_cmd = &mode_cmd_local;
454         }
455
456         return drm_gem_fb_create(dev, file_priv, mode_cmd);
457 }
458
459 /* Our CTM has some peculiar limitations: we can only enable it for one CRTC
460  * at a time and the HW only supports S0.9 scalars. To account for the latter,
461  * we don't allow userland to set a CTM that we have no hope of approximating.
462  */
463 static int
464 vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
465 {
466         struct vc4_dev *vc4 = to_vc4_dev(dev);
467         struct vc4_ctm_state *ctm_state = NULL;
468         struct drm_crtc *crtc;
469         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
470         struct drm_color_ctm *ctm;
471         int i;
472
473         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
474                 /* CTM is being disabled. */
475                 if (!new_crtc_state->ctm && old_crtc_state->ctm) {
476                         ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
477                         if (IS_ERR(ctm_state))
478                                 return PTR_ERR(ctm_state);
479                         ctm_state->fifo = 0;
480                 }
481         }
482
483         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
484                 if (new_crtc_state->ctm == old_crtc_state->ctm)
485                         continue;
486
487                 if (!ctm_state) {
488                         ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
489                         if (IS_ERR(ctm_state))
490                                 return PTR_ERR(ctm_state);
491                 }
492
493                 /* CTM is being enabled or the matrix changed. */
494                 if (new_crtc_state->ctm) {
495                         struct vc4_crtc_state *vc4_crtc_state =
496                                 to_vc4_crtc_state(new_crtc_state);
497
498                         /* fifo is 1-based since 0 disables CTM. */
499                         int fifo = vc4_crtc_state->assigned_channel + 1;
500
501                         /* Check userland isn't trying to turn on CTM for more
502                          * than one CRTC at a time.
503                          */
504                         if (ctm_state->fifo && ctm_state->fifo != fifo) {
505                                 DRM_DEBUG_DRIVER("Too many CTM configured\n");
506                                 return -EINVAL;
507                         }
508
509                         /* Check we can approximate the specified CTM.
510                          * We disallow scalars |c| > 1.0 since the HW has
511                          * no integer bits.
512                          */
513                         ctm = new_crtc_state->ctm->data;
514                         for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
515                                 u64 val = ctm->matrix[i];
516
517                                 val &= ~BIT_ULL(63);
518                                 if (val > BIT_ULL(32))
519                                         return -EINVAL;
520                         }
521
522                         ctm_state->fifo = fifo;
523                         ctm_state->ctm = ctm;
524                 }
525         }
526
527         return 0;
528 }
529
530 static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
531 {
532         struct drm_plane_state *old_plane_state, *new_plane_state;
533         struct vc4_dev *vc4 = to_vc4_dev(state->dev);
534         struct vc4_load_tracker_state *load_state;
535         struct drm_private_state *priv_state;
536         struct drm_plane *plane;
537         int i;
538
539         if (!vc4->load_tracker_available)
540                 return 0;
541
542         priv_state = drm_atomic_get_private_obj_state(state,
543                                                       &vc4->load_tracker);
544         if (IS_ERR(priv_state))
545                 return PTR_ERR(priv_state);
546
547         load_state = to_vc4_load_tracker_state(priv_state);
548         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
549                                        new_plane_state, i) {
550                 struct vc4_plane_state *vc4_plane_state;
551
552                 if (old_plane_state->fb && old_plane_state->crtc) {
553                         vc4_plane_state = to_vc4_plane_state(old_plane_state);
554                         load_state->membus_load -= vc4_plane_state->membus_load;
555                         load_state->hvs_load -= vc4_plane_state->hvs_load;
556                 }
557
558                 if (new_plane_state->fb && new_plane_state->crtc) {
559                         vc4_plane_state = to_vc4_plane_state(new_plane_state);
560                         load_state->membus_load += vc4_plane_state->membus_load;
561                         load_state->hvs_load += vc4_plane_state->hvs_load;
562                 }
563         }
564
565         /* Don't check the load when the tracker is disabled. */
566         if (!vc4->load_tracker_enabled)
567                 return 0;
568
569         /* The absolute limit is 2Gbyte/sec, but let's take a margin to let
570          * the system work when other blocks are accessing the memory.
571          */
572         if (load_state->membus_load > SZ_1G + SZ_512M)
573                 return -ENOSPC;
574
575         /* HVS clock is supposed to run @ 250Mhz, let's take a margin and
576          * consider the maximum number of cycles is 240M.
577          */
578         if (load_state->hvs_load > 240000000ULL)
579                 return -ENOSPC;
580
581         return 0;
582 }
583
584 static struct drm_private_state *
585 vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
586 {
587         struct vc4_load_tracker_state *state;
588
589         state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
590         if (!state)
591                 return NULL;
592
593         __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
594
595         return &state->base;
596 }
597
598 static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
599                                            struct drm_private_state *state)
600 {
601         struct vc4_load_tracker_state *load_state;
602
603         load_state = to_vc4_load_tracker_state(state);
604         kfree(load_state);
605 }
606
607 static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
608         .atomic_duplicate_state = vc4_load_tracker_duplicate_state,
609         .atomic_destroy_state = vc4_load_tracker_destroy_state,
610 };
611
612 #define NUM_OUTPUTS  6
613 #define NUM_CHANNELS 3
614
615 static int
616 vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
617 {
618         unsigned long unassigned_channels = GENMASK(NUM_CHANNELS - 1, 0);
619         struct drm_crtc_state *crtc_state;
620         struct drm_crtc *crtc;
621         int i, ret;
622
623         /*
624          * Since the HVS FIFOs are shared across all the pixelvalves and
625          * the TXP (and thus all the CRTCs), we need to pull the current
626          * state of all the enabled CRTCs so that an update to a single
627          * CRTC still keeps the previous FIFOs enabled and assigned to
628          * the same CRTCs, instead of evaluating only the CRTC being
629          * modified.
630          */
631         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
632                 if (!crtc->state->enable)
633                         continue;
634
635                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
636                 if (IS_ERR(crtc_state))
637                         return PTR_ERR(crtc_state);
638         }
639
640         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
641                 struct vc4_crtc_state *vc4_crtc_state =
642                         to_vc4_crtc_state(crtc_state);
643                 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
644                 unsigned int matching_channels;
645
646                 if (!crtc_state->active)
647                         continue;
648
649                 /*
650                  * The problem we have to solve here is that we have
651                  * up to 7 encoders, connected to up to 6 CRTCs.
652                  *
653                  * Those CRTCs, depending on the instance, can be
654                  * routed to 1, 2 or 3 HVS FIFOs, and we need to set
655                  * the change the muxing between FIFOs and outputs in
656                  * the HVS accordingly.
657                  *
658                  * It would be pretty hard to come up with an
659                  * algorithm that would generically solve
660                  * this. However, the current routing trees we support
661                  * allow us to simplify a bit the problem.
662                  *
663                  * Indeed, with the current supported layouts, if we
664                  * try to assign in the ascending crtc index order the
665                  * FIFOs, we can't fall into the situation where an
666                  * earlier CRTC that had multiple routes is assigned
667                  * one that was the only option for a later CRTC.
668                  *
669                  * If the layout changes and doesn't give us that in
670                  * the future, we will need to have something smarter,
671                  * but it works so far.
672                  */
673                 matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
674                 if (matching_channels) {
675                         unsigned int channel = ffs(matching_channels) - 1;
676
677                         vc4_crtc_state->assigned_channel = channel;
678                         unassigned_channels &= ~BIT(channel);
679                 } else {
680                         return -EINVAL;
681                 }
682         }
683
684         ret = vc4_ctm_atomic_check(dev, state);
685         if (ret < 0)
686                 return ret;
687
688         ret = drm_atomic_helper_check(dev, state);
689         if (ret)
690                 return ret;
691
692         return vc4_load_tracker_atomic_check(state);
693 }
694
695 static const struct drm_mode_config_funcs vc4_mode_funcs = {
696         .atomic_check = vc4_atomic_check,
697         .atomic_commit = vc4_atomic_commit,
698         .fb_create = vc4_fb_create,
699 };
700
701 int vc4_kms_load(struct drm_device *dev)
702 {
703         struct vc4_dev *vc4 = to_vc4_dev(dev);
704         struct vc4_ctm_state *ctm_state;
705         struct vc4_load_tracker_state *load_state;
706         bool is_vc5 = of_device_is_compatible(dev->dev->of_node,
707                                               "brcm,bcm2711-vc5");
708         int ret;
709
710         if (!is_vc5) {
711                 vc4->load_tracker_available = true;
712
713                 /* Start with the load tracker enabled. Can be
714                  * disabled through the debugfs load_tracker file.
715                  */
716                 vc4->load_tracker_enabled = true;
717         }
718
719         sema_init(&vc4->async_modeset, 1);
720
721         /* Set support for vblank irq fast disable, before drm_vblank_init() */
722         dev->vblank_disable_immediate = true;
723
724         dev->irq_enabled = true;
725         ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
726         if (ret < 0) {
727                 dev_err(dev->dev, "failed to initialize vblank\n");
728                 return ret;
729         }
730
731         if (is_vc5) {
732                 dev->mode_config.max_width = 7680;
733                 dev->mode_config.max_height = 7680;
734         } else {
735                 dev->mode_config.max_width = 2048;
736                 dev->mode_config.max_height = 2048;
737         }
738
739         dev->mode_config.funcs = &vc4_mode_funcs;
740         dev->mode_config.preferred_depth = 24;
741         dev->mode_config.async_page_flip = true;
742         dev->mode_config.allow_fb_modifiers = true;
743
744         drm_modeset_lock_init(&vc4->ctm_state_lock);
745
746         ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
747         if (!ctm_state)
748                 return -ENOMEM;
749
750         drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base,
751                                     &vc4_ctm_state_funcs);
752
753         if (vc4->load_tracker_available) {
754                 load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
755                 if (!load_state) {
756                         drm_atomic_private_obj_fini(&vc4->ctm_manager);
757                         return -ENOMEM;
758                 }
759
760                 drm_atomic_private_obj_init(dev, &vc4->load_tracker,
761                                             &load_state->base,
762                                             &vc4_load_tracker_state_funcs);
763         }
764
765         drm_mode_config_reset(dev);
766
767         drm_kms_helper_poll_init(dev);
768
769         return 0;
770 }
This page took 0.079356 seconds and 4 git commands to generate.