]> Git Repo - linux.git/blob - drivers/gpu/drm/mediatek/mtk_drm_crtc.c
Merge tag 'thermal-v6.10-rc1-2' of ssh://gitolite.kernel.org/pub/scm/linux/kernel...
[linux.git] / drivers / gpu / drm / mediatek / mtk_drm_crtc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015 MediaTek Inc.
4  */
5
6 #include <linux/clk.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/mailbox_controller.h>
9 #include <linux/of.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/soc/mediatek/mtk-cmdq.h>
12 #include <linux/soc/mediatek/mtk-mmsys.h>
13 #include <linux/soc/mediatek/mtk-mutex.h>
14
15 #include <asm/barrier.h>
16
17 #include <drm/drm_atomic.h>
18 #include <drm/drm_atomic_helper.h>
19 #include <drm/drm_probe_helper.h>
20 #include <drm/drm_vblank.h>
21
22 #include "mtk_drm_drv.h"
23 #include "mtk_drm_crtc.h"
24 #include "mtk_drm_ddp_comp.h"
25 #include "mtk_drm_gem.h"
26 #include "mtk_drm_plane.h"
27
28 /*
29  * struct mtk_drm_crtc - MediaTek specific crtc structure.
30  * @base: crtc object.
31  * @enabled: records whether crtc_enable succeeded
32  * @planes: array of 4 drm_plane structures, one for each overlay plane
33  * @pending_planes: whether any plane has pending changes to be applied
34  * @mmsys_dev: pointer to the mmsys device for configuration registers
35  * @mutex: handle to one of the ten disp_mutex streams
36  * @ddp_comp_nr: number of components in ddp_comp
37  * @ddp_comp: array of pointers the mtk_ddp_comp structures used by this crtc
38  *
39  * TODO: Needs update: this header is missing a bunch of member descriptions.
40  */
41 struct mtk_drm_crtc {
42         struct drm_crtc                 base;
43         bool                            enabled;
44
45         bool                            pending_needs_vblank;
46         struct drm_pending_vblank_event *event;
47
48         struct drm_plane                *planes;
49         unsigned int                    layer_nr;
50         bool                            pending_planes;
51         bool                            pending_async_planes;
52
53 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
54         struct cmdq_client              cmdq_client;
55         struct cmdq_pkt                 cmdq_handle;
56         u32                             cmdq_event;
57         u32                             cmdq_vblank_cnt;
58         wait_queue_head_t               cb_blocking_queue;
59 #endif
60
61         struct device                   *mmsys_dev;
62         struct device                   *dma_dev;
63         struct mtk_mutex                *mutex;
64         unsigned int                    ddp_comp_nr;
65         struct mtk_ddp_comp             **ddp_comp;
66         unsigned int                    num_conn_routes;
67         const struct mtk_drm_route      *conn_routes;
68
69         /* lock for display hardware access */
70         struct mutex                    hw_lock;
71         bool                            config_updating;
72 };
73
74 struct mtk_crtc_state {
75         struct drm_crtc_state           base;
76
77         bool                            pending_config;
78         unsigned int                    pending_width;
79         unsigned int                    pending_height;
80         unsigned int                    pending_vrefresh;
81 };
82
83 static inline struct mtk_drm_crtc *to_mtk_crtc(struct drm_crtc *c)
84 {
85         return container_of(c, struct mtk_drm_crtc, base);
86 }
87
88 static inline struct mtk_crtc_state *to_mtk_crtc_state(struct drm_crtc_state *s)
89 {
90         return container_of(s, struct mtk_crtc_state, base);
91 }
92
93 static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
94 {
95         struct drm_crtc *crtc = &mtk_crtc->base;
96         unsigned long flags;
97
98         if (mtk_crtc->event) {
99                 spin_lock_irqsave(&crtc->dev->event_lock, flags);
100                 drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
101                 drm_crtc_vblank_put(crtc);
102                 mtk_crtc->event = NULL;
103                 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
104         }
105 }
106
107 static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
108 {
109         drm_crtc_handle_vblank(&mtk_crtc->base);
110         if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) {
111                 mtk_drm_crtc_finish_page_flip(mtk_crtc);
112                 mtk_crtc->pending_needs_vblank = false;
113         }
114 }
115
116 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
117 static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt,
118                                    size_t size)
119 {
120         struct device *dev;
121         dma_addr_t dma_addr;
122
123         pkt->va_base = kzalloc(size, GFP_KERNEL);
124         if (!pkt->va_base)
125                 return -ENOMEM;
126
127         pkt->buf_size = size;
128         pkt->cl = (void *)client;
129
130         dev = client->chan->mbox->dev;
131         dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
132                                   DMA_TO_DEVICE);
133         if (dma_mapping_error(dev, dma_addr)) {
134                 dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
135                 kfree(pkt->va_base);
136                 return -ENOMEM;
137         }
138
139         pkt->pa_base = dma_addr;
140
141         return 0;
142 }
143
144 static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt)
145 {
146         struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
147
148         dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
149                          DMA_TO_DEVICE);
150         kfree(pkt->va_base);
151 }
152 #endif
153
154 static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
155 {
156         struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
157         int i;
158
159         mtk_mutex_put(mtk_crtc->mutex);
160 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
161         mtk_drm_cmdq_pkt_destroy(&mtk_crtc->cmdq_handle);
162
163         if (mtk_crtc->cmdq_client.chan) {
164                 mbox_free_channel(mtk_crtc->cmdq_client.chan);
165                 mtk_crtc->cmdq_client.chan = NULL;
166         }
167 #endif
168
169         for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
170                 struct mtk_ddp_comp *comp;
171
172                 comp = mtk_crtc->ddp_comp[i];
173                 mtk_ddp_comp_unregister_vblank_cb(comp);
174         }
175
176         drm_crtc_cleanup(crtc);
177 }
178
179 static void mtk_drm_crtc_reset(struct drm_crtc *crtc)
180 {
181         struct mtk_crtc_state *state;
182
183         if (crtc->state)
184                 __drm_atomic_helper_crtc_destroy_state(crtc->state);
185
186         kfree(to_mtk_crtc_state(crtc->state));
187         crtc->state = NULL;
188
189         state = kzalloc(sizeof(*state), GFP_KERNEL);
190         if (state)
191                 __drm_atomic_helper_crtc_reset(crtc, &state->base);
192 }
193
194 static struct drm_crtc_state *mtk_drm_crtc_duplicate_state(struct drm_crtc *crtc)
195 {
196         struct mtk_crtc_state *state;
197
198         state = kmalloc(sizeof(*state), GFP_KERNEL);
199         if (!state)
200                 return NULL;
201
202         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
203
204         WARN_ON(state->base.crtc != crtc);
205         state->base.crtc = crtc;
206         state->pending_config = false;
207
208         return &state->base;
209 }
210
211 static void mtk_drm_crtc_destroy_state(struct drm_crtc *crtc,
212                                        struct drm_crtc_state *state)
213 {
214         __drm_atomic_helper_crtc_destroy_state(state);
215         kfree(to_mtk_crtc_state(state));
216 }
217
218 static enum drm_mode_status
219 mtk_drm_crtc_mode_valid(struct drm_crtc *crtc,
220                         const struct drm_display_mode *mode)
221 {
222         struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
223         enum drm_mode_status status = MODE_OK;
224         int i;
225
226         for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
227                 status = mtk_ddp_comp_mode_valid(mtk_crtc->ddp_comp[i], mode);
228                 if (status != MODE_OK)
229                         break;
230         }
231         return status;
232 }
233
234 static bool mtk_drm_crtc_mode_fixup(struct drm_crtc *crtc,
235                                     const struct drm_display_mode *mode,
236                                     struct drm_display_mode *adjusted_mode)
237 {
238         /* Nothing to do here, but this callback is mandatory. */
239         return true;
240 }
241
242 static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
243 {
244         struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state);
245
246         state->pending_width = crtc->mode.hdisplay;
247         state->pending_height = crtc->mode.vdisplay;
248         state->pending_vrefresh = drm_mode_vrefresh(&crtc->mode);
249         wmb();  /* Make sure the above parameters are set before update */
250         state->pending_config = true;
251 }
252
253 static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
254 {
255         int ret;
256         int i;
257
258         for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
259                 ret = mtk_ddp_comp_clk_enable(mtk_crtc->ddp_comp[i]);
260                 if (ret) {
261                         DRM_ERROR("Failed to enable clock %d: %d\n", i, ret);
262                         goto err;
263                 }
264         }
265
266         return 0;
267 err:
268         while (--i >= 0)
269                 mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]);
270         return ret;
271 }
272
273 static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
274 {
275         int i;
276
277         for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
278                 mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]);
279 }
280
281 static
282 struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
283                                                 struct drm_plane *plane,
284                                                 unsigned int *local_layer)
285 {
286         struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
287         struct mtk_ddp_comp *comp;
288         int i, count = 0;
289         unsigned int local_index = plane - mtk_crtc->planes;
290
291         for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
292                 comp = mtk_crtc->ddp_comp[i];
293                 if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) {
294                         *local_layer = local_index - count;
295                         return comp;
296                 }
297                 count += mtk_ddp_comp_layer_nr(comp);
298         }
299
300         WARN(1, "Failed to find component for plane %d\n", plane->index);
301         return NULL;
302 }
303
304 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
305 static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
306 {
307         struct cmdq_cb_data *data = mssg;
308         struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client);
309         struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client);
310         struct mtk_crtc_state *state;
311         unsigned int i;
312
313         if (data->sta < 0)
314                 return;
315
316         state = to_mtk_crtc_state(mtk_crtc->base.state);
317
318         state->pending_config = false;
319
320         if (mtk_crtc->pending_planes) {
321                 for (i = 0; i < mtk_crtc->layer_nr; i++) {
322                         struct drm_plane *plane = &mtk_crtc->planes[i];
323                         struct mtk_plane_state *plane_state;
324
325                         plane_state = to_mtk_plane_state(plane->state);
326
327                         plane_state->pending.config = false;
328                 }
329                 mtk_crtc->pending_planes = false;
330         }
331
332         if (mtk_crtc->pending_async_planes) {
333                 for (i = 0; i < mtk_crtc->layer_nr; i++) {
334                         struct drm_plane *plane = &mtk_crtc->planes[i];
335                         struct mtk_plane_state *plane_state;
336
337                         plane_state = to_mtk_plane_state(plane->state);
338
339                         plane_state->pending.async_config = false;
340                 }
341                 mtk_crtc->pending_async_planes = false;
342         }
343
344         mtk_crtc->cmdq_vblank_cnt = 0;
345         wake_up(&mtk_crtc->cb_blocking_queue);
346 }
347 #endif
348
349 static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
350 {
351         struct drm_crtc *crtc = &mtk_crtc->base;
352         struct drm_connector *connector;
353         struct drm_encoder *encoder;
354         struct drm_connector_list_iter conn_iter;
355         unsigned int width, height, vrefresh, bpc = MTK_MAX_BPC;
356         int ret;
357         int i;
358
359         if (WARN_ON(!crtc->state))
360                 return -EINVAL;
361
362         width = crtc->state->adjusted_mode.hdisplay;
363         height = crtc->state->adjusted_mode.vdisplay;
364         vrefresh = drm_mode_vrefresh(&crtc->state->adjusted_mode);
365
366         drm_for_each_encoder(encoder, crtc->dev) {
367                 if (encoder->crtc != crtc)
368                         continue;
369
370                 drm_connector_list_iter_begin(crtc->dev, &conn_iter);
371                 drm_for_each_connector_iter(connector, &conn_iter) {
372                         if (connector->encoder != encoder)
373                                 continue;
374                         if (connector->display_info.bpc != 0 &&
375                             bpc > connector->display_info.bpc)
376                                 bpc = connector->display_info.bpc;
377                 }
378                 drm_connector_list_iter_end(&conn_iter);
379         }
380
381         ret = pm_runtime_resume_and_get(crtc->dev->dev);
382         if (ret < 0) {
383                 DRM_ERROR("Failed to enable power domain: %d\n", ret);
384                 return ret;
385         }
386
387         ret = mtk_mutex_prepare(mtk_crtc->mutex);
388         if (ret < 0) {
389                 DRM_ERROR("Failed to enable mutex clock: %d\n", ret);
390                 goto err_pm_runtime_put;
391         }
392
393         ret = mtk_crtc_ddp_clk_enable(mtk_crtc);
394         if (ret < 0) {
395                 DRM_ERROR("Failed to enable component clocks: %d\n", ret);
396                 goto err_mutex_unprepare;
397         }
398
399         for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
400                 if (!mtk_ddp_comp_connect(mtk_crtc->ddp_comp[i], mtk_crtc->mmsys_dev,
401                                           mtk_crtc->ddp_comp[i + 1]->id))
402                         mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev,
403                                               mtk_crtc->ddp_comp[i]->id,
404                                               mtk_crtc->ddp_comp[i + 1]->id);
405                 if (!mtk_ddp_comp_add(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
406                         mtk_mutex_add_comp(mtk_crtc->mutex,
407                                            mtk_crtc->ddp_comp[i]->id);
408         }
409         if (!mtk_ddp_comp_add(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
410                 mtk_mutex_add_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
411         mtk_mutex_enable(mtk_crtc->mutex);
412
413         for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
414                 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i];
415
416                 if (i == 1)
417                         mtk_ddp_comp_bgclr_in_on(comp);
418
419                 mtk_ddp_comp_config(comp, width, height, vrefresh, bpc, NULL);
420                 mtk_ddp_comp_start(comp);
421         }
422
423         /* Initially configure all planes */
424         for (i = 0; i < mtk_crtc->layer_nr; i++) {
425                 struct drm_plane *plane = &mtk_crtc->planes[i];
426                 struct mtk_plane_state *plane_state;
427                 struct mtk_ddp_comp *comp;
428                 unsigned int local_layer;
429
430                 plane_state = to_mtk_plane_state(plane->state);
431
432                 /* should not enable layer before crtc enabled */
433                 plane_state->pending.enable = false;
434                 comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
435                 if (comp)
436                         mtk_ddp_comp_layer_config(comp, local_layer,
437                                                   plane_state, NULL);
438         }
439
440         return 0;
441
442 err_mutex_unprepare:
443         mtk_mutex_unprepare(mtk_crtc->mutex);
444 err_pm_runtime_put:
445         pm_runtime_put(crtc->dev->dev);
446         return ret;
447 }
448
449 static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
450 {
451         struct drm_device *drm = mtk_crtc->base.dev;
452         struct drm_crtc *crtc = &mtk_crtc->base;
453         int i;
454
455         for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
456                 mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]);
457                 if (i == 1)
458                         mtk_ddp_comp_bgclr_in_off(mtk_crtc->ddp_comp[i]);
459         }
460
461         for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
462                 if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
463                         mtk_mutex_remove_comp(mtk_crtc->mutex,
464                                               mtk_crtc->ddp_comp[i]->id);
465         mtk_mutex_disable(mtk_crtc->mutex);
466         for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
467                 if (!mtk_ddp_comp_disconnect(mtk_crtc->ddp_comp[i], mtk_crtc->mmsys_dev,
468                                              mtk_crtc->ddp_comp[i + 1]->id))
469                         mtk_mmsys_ddp_disconnect(mtk_crtc->mmsys_dev,
470                                                  mtk_crtc->ddp_comp[i]->id,
471                                                  mtk_crtc->ddp_comp[i + 1]->id);
472                 if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
473                         mtk_mutex_remove_comp(mtk_crtc->mutex,
474                                               mtk_crtc->ddp_comp[i]->id);
475         }
476         if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
477                 mtk_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
478         mtk_crtc_ddp_clk_disable(mtk_crtc);
479         mtk_mutex_unprepare(mtk_crtc->mutex);
480
481         pm_runtime_put(drm->dev);
482
483         if (crtc->state->event && !crtc->state->active) {
484                 spin_lock_irq(&crtc->dev->event_lock);
485                 drm_crtc_send_vblank_event(crtc, crtc->state->event);
486                 crtc->state->event = NULL;
487                 spin_unlock_irq(&crtc->dev->event_lock);
488         }
489 }
490
491 static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
492                                 struct cmdq_pkt *cmdq_handle)
493 {
494         struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
495         struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
496         struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
497         unsigned int i;
498         unsigned int local_layer;
499
500         /*
501          * TODO: instead of updating the registers here, we should prepare
502          * working registers in atomic_commit and let the hardware command
503          * queue update module registers on vblank.
504          */
505         if (state->pending_config) {
506                 mtk_ddp_comp_config(comp, state->pending_width,
507                                     state->pending_height,
508                                     state->pending_vrefresh, 0,
509                                     cmdq_handle);
510
511                 if (!cmdq_handle)
512                         state->pending_config = false;
513         }
514
515         if (mtk_crtc->pending_planes) {
516                 for (i = 0; i < mtk_crtc->layer_nr; i++) {
517                         struct drm_plane *plane = &mtk_crtc->planes[i];
518                         struct mtk_plane_state *plane_state;
519
520                         plane_state = to_mtk_plane_state(plane->state);
521
522                         if (!plane_state->pending.config)
523                                 continue;
524
525                         comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
526                                                           &local_layer);
527
528                         if (comp)
529                                 mtk_ddp_comp_layer_config(comp, local_layer,
530                                                           plane_state,
531                                                           cmdq_handle);
532                         if (!cmdq_handle)
533                                 plane_state->pending.config = false;
534                 }
535
536                 if (!cmdq_handle)
537                         mtk_crtc->pending_planes = false;
538         }
539
540         if (mtk_crtc->pending_async_planes) {
541                 for (i = 0; i < mtk_crtc->layer_nr; i++) {
542                         struct drm_plane *plane = &mtk_crtc->planes[i];
543                         struct mtk_plane_state *plane_state;
544
545                         plane_state = to_mtk_plane_state(plane->state);
546
547                         if (!plane_state->pending.async_config)
548                                 continue;
549
550                         comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
551                                                           &local_layer);
552
553                         if (comp)
554                                 mtk_ddp_comp_layer_config(comp, local_layer,
555                                                           plane_state,
556                                                           cmdq_handle);
557                         if (!cmdq_handle)
558                                 plane_state->pending.async_config = false;
559                 }
560
561                 if (!cmdq_handle)
562                         mtk_crtc->pending_async_planes = false;
563         }
564 }
565
566 static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
567                                        bool needs_vblank)
568 {
569 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
570         struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle;
571 #endif
572         struct drm_crtc *crtc = &mtk_crtc->base;
573         struct mtk_drm_private *priv = crtc->dev->dev_private;
574         unsigned int pending_planes = 0, pending_async_planes = 0;
575         int i;
576
577         mutex_lock(&mtk_crtc->hw_lock);
578         mtk_crtc->config_updating = true;
579         if (needs_vblank)
580                 mtk_crtc->pending_needs_vblank = true;
581
582         for (i = 0; i < mtk_crtc->layer_nr; i++) {
583                 struct drm_plane *plane = &mtk_crtc->planes[i];
584                 struct mtk_plane_state *plane_state;
585
586                 plane_state = to_mtk_plane_state(plane->state);
587                 if (plane_state->pending.dirty) {
588                         plane_state->pending.config = true;
589                         plane_state->pending.dirty = false;
590                         pending_planes |= BIT(i);
591                 } else if (plane_state->pending.async_dirty) {
592                         plane_state->pending.async_config = true;
593                         plane_state->pending.async_dirty = false;
594                         pending_async_planes |= BIT(i);
595                 }
596         }
597         if (pending_planes)
598                 mtk_crtc->pending_planes = true;
599         if (pending_async_planes)
600                 mtk_crtc->pending_async_planes = true;
601
602         if (priv->data->shadow_register) {
603                 mtk_mutex_acquire(mtk_crtc->mutex);
604                 mtk_crtc_ddp_config(crtc, NULL);
605                 mtk_mutex_release(mtk_crtc->mutex);
606         }
607 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
608         if (mtk_crtc->cmdq_client.chan) {
609                 mbox_flush(mtk_crtc->cmdq_client.chan, 2000);
610                 cmdq_handle->cmd_buf_size = 0;
611                 cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
612                 cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
613                 mtk_crtc_ddp_config(crtc, cmdq_handle);
614                 cmdq_pkt_finalize(cmdq_handle);
615                 dma_sync_single_for_device(mtk_crtc->cmdq_client.chan->mbox->dev,
616                                            cmdq_handle->pa_base,
617                                            cmdq_handle->cmd_buf_size,
618                                            DMA_TO_DEVICE);
619                 /*
620                  * CMDQ command should execute in next 3 vblank.
621                  * One vblank interrupt before send message (occasionally)
622                  * and one vblank interrupt after cmdq done,
623                  * so it's timeout after 3 vblank interrupt.
624                  * If it fail to execute in next 3 vblank, timeout happen.
625                  */
626                 mtk_crtc->cmdq_vblank_cnt = 3;
627
628                 mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle);
629                 mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
630         }
631 #endif
632         mtk_crtc->config_updating = false;
633         mutex_unlock(&mtk_crtc->hw_lock);
634 }
635
636 static void mtk_crtc_ddp_irq(void *data)
637 {
638         struct drm_crtc *crtc = data;
639         struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
640         struct mtk_drm_private *priv = crtc->dev->dev_private;
641
642 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
643         if (!priv->data->shadow_register && !mtk_crtc->cmdq_client.chan)
644                 mtk_crtc_ddp_config(crtc, NULL);
645         else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0)
646                 DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n",
647                           drm_crtc_index(&mtk_crtc->base));
648 #else
649         if (!priv->data->shadow_register)
650                 mtk_crtc_ddp_config(crtc, NULL);
651 #endif
652         mtk_drm_finish_page_flip(mtk_crtc);
653 }
654
655 static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
656 {
657         struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
658         struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
659
660         mtk_ddp_comp_enable_vblank(comp);
661
662         return 0;
663 }
664
665 static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
666 {
667         struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
668         struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
669
670         mtk_ddp_comp_disable_vblank(comp);
671 }
672
673 static void mtk_drm_crtc_update_output(struct drm_crtc *crtc,
674                                        struct drm_atomic_state *state)
675 {
676         int crtc_index = drm_crtc_index(crtc);
677         int i;
678         struct device *dev;
679         struct drm_crtc_state *crtc_state = state->crtcs[crtc_index].new_state;
680         struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
681         struct mtk_drm_private *priv;
682         unsigned int encoder_mask = crtc_state->encoder_mask;
683
684         if (!crtc_state->connectors_changed)
685                 return;
686
687         if (!mtk_crtc->num_conn_routes)
688                 return;
689
690         priv = ((struct mtk_drm_private *)crtc->dev->dev_private)->all_drm_private[crtc_index];
691         dev = priv->dev;
692
693         dev_dbg(dev, "connector change:%d, encoder mask:0x%x for crtc:%d\n",
694                 crtc_state->connectors_changed, encoder_mask, crtc_index);
695
696         for (i = 0; i < mtk_crtc->num_conn_routes; i++) {
697                 unsigned int comp_id = mtk_crtc->conn_routes[i].route_ddp;
698                 struct mtk_ddp_comp *comp = &priv->ddp_comp[comp_id];
699
700                 if (comp->encoder_index >= 0 &&
701                     (encoder_mask & BIT(comp->encoder_index))) {
702                         mtk_crtc->ddp_comp[mtk_crtc->ddp_comp_nr - 1] = comp;
703                         dev_dbg(dev, "Add comp_id: %d at path index %d\n",
704                                 comp->id, mtk_crtc->ddp_comp_nr - 1);
705                         break;
706                 }
707         }
708 }
709
710 int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
711                              struct mtk_plane_state *state)
712 {
713         unsigned int local_layer;
714         struct mtk_ddp_comp *comp;
715
716         comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
717         if (comp)
718                 return mtk_ddp_comp_layer_check(comp, local_layer, state);
719         return 0;
720 }
721
722 void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
723                                struct drm_atomic_state *state)
724 {
725         struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
726
727         if (!mtk_crtc->enabled)
728                 return;
729
730         mtk_drm_crtc_update_config(mtk_crtc, false);
731 }
732
733 static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
734                                        struct drm_atomic_state *state)
735 {
736         struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
737         struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
738         int ret;
739
740         DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
741
742         ret = mtk_ddp_comp_power_on(comp);
743         if (ret < 0) {
744                 DRM_DEV_ERROR(comp->dev, "Failed to enable power domain: %d\n", ret);
745                 return;
746         }
747
748         mtk_drm_crtc_update_output(crtc, state);
749
750         ret = mtk_crtc_ddp_hw_init(mtk_crtc);
751         if (ret) {
752                 mtk_ddp_comp_power_off(comp);
753                 return;
754         }
755
756         drm_crtc_vblank_on(crtc);
757         mtk_crtc->enabled = true;
758 }
759
760 static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
761                                         struct drm_atomic_state *state)
762 {
763         struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
764         struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
765         int i;
766
767         DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
768         if (!mtk_crtc->enabled)
769                 return;
770
771         /* Set all pending plane state to disabled */
772         for (i = 0; i < mtk_crtc->layer_nr; i++) {
773                 struct drm_plane *plane = &mtk_crtc->planes[i];
774                 struct mtk_plane_state *plane_state;
775
776                 plane_state = to_mtk_plane_state(plane->state);
777                 plane_state->pending.enable = false;
778                 plane_state->pending.config = true;
779         }
780         mtk_crtc->pending_planes = true;
781
782         mtk_drm_crtc_update_config(mtk_crtc, false);
783 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
784         /* Wait for planes to be disabled by cmdq */
785         if (mtk_crtc->cmdq_client.chan)
786                 wait_event_timeout(mtk_crtc->cb_blocking_queue,
787                                    mtk_crtc->cmdq_vblank_cnt == 0,
788                                    msecs_to_jiffies(500));
789 #endif
790         /* Wait for planes to be disabled */
791         drm_crtc_wait_one_vblank(crtc);
792
793         drm_crtc_vblank_off(crtc);
794         mtk_crtc_ddp_hw_fini(mtk_crtc);
795         mtk_ddp_comp_power_off(comp);
796
797         mtk_crtc->enabled = false;
798 }
799
800 static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
801                                       struct drm_atomic_state *state)
802 {
803         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
804                                                                           crtc);
805         struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state);
806         struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
807         unsigned long flags;
808
809         if (mtk_crtc->event && mtk_crtc_state->base.event)
810                 DRM_ERROR("new event while there is still a pending event\n");
811
812         if (mtk_crtc_state->base.event) {
813                 mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc);
814                 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
815
816                 spin_lock_irqsave(&crtc->dev->event_lock, flags);
817                 mtk_crtc->event = mtk_crtc_state->base.event;
818                 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
819
820                 mtk_crtc_state->base.event = NULL;
821         }
822 }
823
824 static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
825                                       struct drm_atomic_state *state)
826 {
827         struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
828         int i;
829
830         if (crtc->state->color_mgmt_changed)
831                 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
832                         mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state);
833                         mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state);
834                 }
835         mtk_drm_crtc_update_config(mtk_crtc, !!mtk_crtc->event);
836 }
837
838 static const struct drm_crtc_funcs mtk_crtc_funcs = {
839         .set_config             = drm_atomic_helper_set_config,
840         .page_flip              = drm_atomic_helper_page_flip,
841         .destroy                = mtk_drm_crtc_destroy,
842         .reset                  = mtk_drm_crtc_reset,
843         .atomic_duplicate_state = mtk_drm_crtc_duplicate_state,
844         .atomic_destroy_state   = mtk_drm_crtc_destroy_state,
845         .enable_vblank          = mtk_drm_crtc_enable_vblank,
846         .disable_vblank         = mtk_drm_crtc_disable_vblank,
847 };
848
849 static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
850         .mode_fixup     = mtk_drm_crtc_mode_fixup,
851         .mode_set_nofb  = mtk_drm_crtc_mode_set_nofb,
852         .mode_valid     = mtk_drm_crtc_mode_valid,
853         .atomic_begin   = mtk_drm_crtc_atomic_begin,
854         .atomic_flush   = mtk_drm_crtc_atomic_flush,
855         .atomic_enable  = mtk_drm_crtc_atomic_enable,
856         .atomic_disable = mtk_drm_crtc_atomic_disable,
857 };
858
859 static int mtk_drm_crtc_init(struct drm_device *drm,
860                              struct mtk_drm_crtc *mtk_crtc,
861                              unsigned int pipe)
862 {
863         struct drm_plane *primary = NULL;
864         struct drm_plane *cursor = NULL;
865         int i, ret;
866
867         for (i = 0; i < mtk_crtc->layer_nr; i++) {
868                 if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY)
869                         primary = &mtk_crtc->planes[i];
870                 else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR)
871                         cursor = &mtk_crtc->planes[i];
872         }
873
874         ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor,
875                                         &mtk_crtc_funcs, NULL);
876         if (ret)
877                 goto err_cleanup_crtc;
878
879         drm_crtc_helper_add(&mtk_crtc->base, &mtk_crtc_helper_funcs);
880
881         return 0;
882
883 err_cleanup_crtc:
884         drm_crtc_cleanup(&mtk_crtc->base);
885         return ret;
886 }
887
888 static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc,
889                                         int comp_idx)
890 {
891         struct mtk_ddp_comp *comp;
892
893         if (comp_idx > 1)
894                 return 0;
895
896         comp = mtk_crtc->ddp_comp[comp_idx];
897         if (!comp->funcs)
898                 return 0;
899
900         if (comp_idx == 1 && !comp->funcs->bgclr_in_on)
901                 return 0;
902
903         return mtk_ddp_comp_layer_nr(comp);
904 }
905
906 static inline
907 enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx,
908                                             unsigned int num_planes)
909 {
910         if (plane_idx == 0)
911                 return DRM_PLANE_TYPE_PRIMARY;
912         else if (plane_idx == (num_planes - 1))
913                 return DRM_PLANE_TYPE_CURSOR;
914         else
915                 return DRM_PLANE_TYPE_OVERLAY;
916
917 }
918
919 static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
920                                          struct mtk_drm_crtc *mtk_crtc,
921                                          int comp_idx, int pipe)
922 {
923         int num_planes = mtk_drm_crtc_num_comp_planes(mtk_crtc, comp_idx);
924         struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx];
925         int i, ret;
926
927         for (i = 0; i < num_planes; i++) {
928                 ret = mtk_plane_init(drm_dev,
929                                 &mtk_crtc->planes[mtk_crtc->layer_nr],
930                                 BIT(pipe),
931                                 mtk_drm_crtc_plane_type(mtk_crtc->layer_nr,
932                                                         num_planes),
933                                 mtk_ddp_comp_supported_rotations(comp),
934                                 mtk_ddp_comp_get_formats(comp),
935                                 mtk_ddp_comp_get_num_formats(comp));
936                 if (ret)
937                         return ret;
938
939                 mtk_crtc->layer_nr++;
940         }
941         return 0;
942 }
943
944 struct device *mtk_drm_crtc_dma_dev_get(struct drm_crtc *crtc)
945 {
946         struct mtk_drm_crtc *mtk_crtc = NULL;
947
948         if (!crtc)
949                 return NULL;
950
951         mtk_crtc = to_mtk_crtc(crtc);
952         if (!mtk_crtc)
953                 return NULL;
954
955         return mtk_crtc->dma_dev;
956 }
957
958 int mtk_drm_crtc_create(struct drm_device *drm_dev,
959                         const unsigned int *path, unsigned int path_len,
960                         int priv_data_index, const struct mtk_drm_route *conn_routes,
961                         unsigned int num_conn_routes)
962 {
963         struct mtk_drm_private *priv = drm_dev->dev_private;
964         struct device *dev = drm_dev->dev;
965         struct mtk_drm_crtc *mtk_crtc;
966         unsigned int num_comp_planes = 0;
967         int ret;
968         int i;
969         bool has_ctm = false;
970         uint gamma_lut_size = 0;
971         struct drm_crtc *tmp;
972         int crtc_i = 0;
973
974         if (!path)
975                 return 0;
976
977         priv = priv->all_drm_private[priv_data_index];
978
979         drm_for_each_crtc(tmp, drm_dev)
980                 crtc_i++;
981
982         for (i = 0; i < path_len; i++) {
983                 enum mtk_ddp_comp_id comp_id = path[i];
984                 struct device_node *node;
985                 struct mtk_ddp_comp *comp;
986
987                 node = priv->comp_node[comp_id];
988                 comp = &priv->ddp_comp[comp_id];
989
990                 /* Not all drm components have a DTS device node, such as ovl_adaptor,
991                  * which is the drm bring up sub driver
992                  */
993                 if (!node && comp_id != DDP_COMPONENT_DRM_OVL_ADAPTOR) {
994                         dev_info(dev,
995                                 "Not creating crtc %d because component %d is disabled or missing\n",
996                                 crtc_i, comp_id);
997                         return 0;
998                 }
999
1000                 if (!comp->dev) {
1001                         dev_err(dev, "Component %pOF not initialized\n", node);
1002                         return -ENODEV;
1003                 }
1004         }
1005
1006         mtk_crtc = devm_kzalloc(dev, sizeof(*mtk_crtc), GFP_KERNEL);
1007         if (!mtk_crtc)
1008                 return -ENOMEM;
1009
1010         mtk_crtc->mmsys_dev = priv->mmsys_dev;
1011         mtk_crtc->ddp_comp_nr = path_len;
1012         mtk_crtc->ddp_comp = devm_kmalloc_array(dev,
1013                                                 mtk_crtc->ddp_comp_nr + (conn_routes ? 1 : 0),
1014                                                 sizeof(*mtk_crtc->ddp_comp),
1015                                                 GFP_KERNEL);
1016         if (!mtk_crtc->ddp_comp)
1017                 return -ENOMEM;
1018
1019         mtk_crtc->mutex = mtk_mutex_get(priv->mutex_dev);
1020         if (IS_ERR(mtk_crtc->mutex)) {
1021                 ret = PTR_ERR(mtk_crtc->mutex);
1022                 dev_err(dev, "Failed to get mutex: %d\n", ret);
1023                 return ret;
1024         }
1025
1026         for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
1027                 unsigned int comp_id = path[i];
1028                 struct mtk_ddp_comp *comp;
1029
1030                 comp = &priv->ddp_comp[comp_id];
1031                 mtk_crtc->ddp_comp[i] = comp;
1032
1033                 if (comp->funcs) {
1034                         if (comp->funcs->gamma_set && comp->funcs->gamma_get_lut_size) {
1035                                 unsigned int lut_sz = mtk_ddp_gamma_get_lut_size(comp);
1036
1037                                 if (lut_sz)
1038                                         gamma_lut_size = lut_sz;
1039                         }
1040
1041                         if (comp->funcs->ctm_set)
1042                                 has_ctm = true;
1043                 }
1044
1045                 mtk_ddp_comp_register_vblank_cb(comp, mtk_crtc_ddp_irq,
1046                                                 &mtk_crtc->base);
1047         }
1048
1049         for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
1050                 num_comp_planes += mtk_drm_crtc_num_comp_planes(mtk_crtc, i);
1051
1052         mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes,
1053                                         sizeof(struct drm_plane), GFP_KERNEL);
1054         if (!mtk_crtc->planes)
1055                 return -ENOMEM;
1056
1057         for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
1058                 ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i,
1059                                                     crtc_i);
1060                 if (ret)
1061                         return ret;
1062         }
1063
1064         /*
1065          * Default to use the first component as the dma dev.
1066          * In the case of ovl_adaptor sub driver, it needs to use the
1067          * dma_dev_get function to get representative dma dev.
1068          */
1069         mtk_crtc->dma_dev = mtk_ddp_comp_dma_dev_get(&priv->ddp_comp[path[0]]);
1070
1071         ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, crtc_i);
1072         if (ret < 0)
1073                 return ret;
1074
1075         if (gamma_lut_size)
1076                 drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size);
1077         drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size);
1078         mutex_init(&mtk_crtc->hw_lock);
1079
1080 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
1081         i = priv->mbox_index++;
1082         mtk_crtc->cmdq_client.client.dev = mtk_crtc->mmsys_dev;
1083         mtk_crtc->cmdq_client.client.tx_block = false;
1084         mtk_crtc->cmdq_client.client.knows_txdone = true;
1085         mtk_crtc->cmdq_client.client.rx_callback = ddp_cmdq_cb;
1086         mtk_crtc->cmdq_client.chan =
1087                         mbox_request_channel(&mtk_crtc->cmdq_client.client, i);
1088         if (IS_ERR(mtk_crtc->cmdq_client.chan)) {
1089                 dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
1090                         drm_crtc_index(&mtk_crtc->base));
1091                 mtk_crtc->cmdq_client.chan = NULL;
1092         }
1093
1094         if (mtk_crtc->cmdq_client.chan) {
1095                 ret = of_property_read_u32_index(priv->mutex_node,
1096                                                  "mediatek,gce-events",
1097                                                  i,
1098                                                  &mtk_crtc->cmdq_event);
1099                 if (ret) {
1100                         dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
1101                                 drm_crtc_index(&mtk_crtc->base));
1102                         mbox_free_channel(mtk_crtc->cmdq_client.chan);
1103                         mtk_crtc->cmdq_client.chan = NULL;
1104                 } else {
1105                         ret = mtk_drm_cmdq_pkt_create(&mtk_crtc->cmdq_client,
1106                                                       &mtk_crtc->cmdq_handle,
1107                                                       PAGE_SIZE);
1108                         if (ret) {
1109                                 dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n",
1110                                         drm_crtc_index(&mtk_crtc->base));
1111                                 mbox_free_channel(mtk_crtc->cmdq_client.chan);
1112                                 mtk_crtc->cmdq_client.chan = NULL;
1113                         }
1114                 }
1115
1116                 /* for sending blocking cmd in crtc disable */
1117                 init_waitqueue_head(&mtk_crtc->cb_blocking_queue);
1118         }
1119 #endif
1120
1121         if (conn_routes) {
1122                 for (i = 0; i < num_conn_routes; i++) {
1123                         unsigned int comp_id = conn_routes[i].route_ddp;
1124                         struct device_node *node = priv->comp_node[comp_id];
1125                         struct mtk_ddp_comp *comp = &priv->ddp_comp[comp_id];
1126
1127                         if (!comp->dev) {
1128                                 dev_dbg(dev, "comp_id:%d, Component %pOF not initialized\n",
1129                                         comp_id, node);
1130                                 /* mark encoder_index to -1, if route comp device is not enabled */
1131                                 comp->encoder_index = -1;
1132                                 continue;
1133                         }
1134
1135                         mtk_ddp_comp_encoder_index_set(&priv->ddp_comp[comp_id]);
1136                 }
1137
1138                 mtk_crtc->num_conn_routes = num_conn_routes;
1139                 mtk_crtc->conn_routes = conn_routes;
1140
1141                 /* increase ddp_comp_nr at the end of mtk_drm_crtc_create */
1142                 mtk_crtc->ddp_comp_nr++;
1143         }
1144
1145         return 0;
1146 }
This page took 0.114132 seconds and 4 git commands to generate.