]> Git Repo - linux.git/blob - drivers/gpu/drm/tegra/plane.c
Merge tag 'linux-watchdog-6.14-rc1' of git://www.linux-watchdog.org/linux-watchdog
[linux.git] / drivers / gpu / drm / tegra / plane.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017 NVIDIA CORPORATION.  All rights reserved.
4  */
5
6 #include <linux/dma-mapping.h>
7 #include <linux/iommu.h>
8 #include <linux/interconnect.h>
9
10 #include <drm/drm_atomic.h>
11 #include <drm/drm_atomic_helper.h>
12 #include <drm/drm_fourcc.h>
13 #include <drm/drm_framebuffer.h>
14 #include <drm/drm_gem_atomic_helper.h>
15
16 #include "dc.h"
17 #include "plane.h"
18
19 static void tegra_plane_destroy(struct drm_plane *plane)
20 {
21         struct tegra_plane *p = to_tegra_plane(plane);
22
23         drm_plane_cleanup(plane);
24         kfree(p);
25 }
26
27 static void tegra_plane_reset(struct drm_plane *plane)
28 {
29         struct tegra_plane *p = to_tegra_plane(plane);
30         struct tegra_plane_state *state;
31         unsigned int i;
32
33         if (plane->state)
34                 __drm_atomic_helper_plane_destroy_state(plane->state);
35
36         kfree(plane->state);
37         plane->state = NULL;
38
39         state = kzalloc(sizeof(*state), GFP_KERNEL);
40         if (state) {
41                 plane->state = &state->base;
42                 plane->state->plane = plane;
43                 plane->state->zpos = p->index;
44                 plane->state->normalized_zpos = p->index;
45
46                 for (i = 0; i < 3; i++)
47                         state->iova[i] = DMA_MAPPING_ERROR;
48         }
49 }
50
51 static struct drm_plane_state *
52 tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
53 {
54         struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
55         struct tegra_plane_state *copy;
56         unsigned int i;
57
58         copy = kmalloc(sizeof(*copy), GFP_KERNEL);
59         if (!copy)
60                 return NULL;
61
62         __drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
63         copy->tiling = state->tiling;
64         copy->format = state->format;
65         copy->swap = state->swap;
66         copy->reflect_x = state->reflect_x;
67         copy->reflect_y = state->reflect_y;
68         copy->opaque = state->opaque;
69         copy->total_peak_memory_bandwidth = state->total_peak_memory_bandwidth;
70         copy->peak_memory_bandwidth = state->peak_memory_bandwidth;
71         copy->avg_memory_bandwidth = state->avg_memory_bandwidth;
72
73         for (i = 0; i < 2; i++)
74                 copy->blending[i] = state->blending[i];
75
76         for (i = 0; i < 3; i++) {
77                 copy->iova[i] = DMA_MAPPING_ERROR;
78                 copy->map[i] = NULL;
79         }
80
81         return &copy->base;
82 }
83
84 static void tegra_plane_atomic_destroy_state(struct drm_plane *plane,
85                                              struct drm_plane_state *state)
86 {
87         __drm_atomic_helper_plane_destroy_state(state);
88         kfree(state);
89 }
90
91 static bool tegra_plane_supports_sector_layout(struct drm_plane *plane)
92 {
93         struct drm_crtc *crtc;
94
95         drm_for_each_crtc(crtc, plane->dev) {
96                 if (plane->possible_crtcs & drm_crtc_mask(crtc)) {
97                         struct tegra_dc *dc = to_tegra_dc(crtc);
98
99                         if (!dc->soc->supports_sector_layout)
100                                 return false;
101                 }
102         }
103
104         return true;
105 }
106
107 static bool tegra_plane_format_mod_supported(struct drm_plane *plane,
108                                              uint32_t format,
109                                              uint64_t modifier)
110 {
111         const struct drm_format_info *info = drm_format_info(format);
112
113         if (modifier == DRM_FORMAT_MOD_LINEAR)
114                 return true;
115
116         /* check for the sector layout bit */
117         if (fourcc_mod_is_vendor(modifier, NVIDIA)) {
118                 if (modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) {
119                         if (!tegra_plane_supports_sector_layout(plane))
120                                 return false;
121                 }
122         }
123
124         if (info->num_planes == 1)
125                 return true;
126
127         return false;
128 }
129
130 const struct drm_plane_funcs tegra_plane_funcs = {
131         .update_plane = drm_atomic_helper_update_plane,
132         .disable_plane = drm_atomic_helper_disable_plane,
133         .destroy = tegra_plane_destroy,
134         .reset = tegra_plane_reset,
135         .atomic_duplicate_state = tegra_plane_atomic_duplicate_state,
136         .atomic_destroy_state = tegra_plane_atomic_destroy_state,
137         .format_mod_supported = tegra_plane_format_mod_supported,
138 };
139
140 static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
141 {
142         unsigned int i;
143         int err;
144
145         for (i = 0; i < state->base.fb->format->num_planes; i++) {
146                 struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
147                 struct host1x_bo_mapping *map;
148
149                 map = host1x_bo_pin(dc->dev, &bo->base, DMA_TO_DEVICE, &dc->client.cache);
150                 if (IS_ERR(map)) {
151                         err = PTR_ERR(map);
152                         goto unpin;
153                 }
154
155                 if (!dc->client.group) {
156                         /*
157                          * The display controller needs contiguous memory, so
158                          * fail if the buffer is discontiguous and we fail to
159                          * map its SG table to a single contiguous chunk of
160                          * I/O virtual memory.
161                          */
162                         if (map->chunks > 1) {
163                                 err = -EINVAL;
164                                 goto unpin;
165                         }
166
167                         state->iova[i] = map->phys;
168                 } else {
169                         state->iova[i] = bo->iova;
170                 }
171
172                 state->map[i] = map;
173         }
174
175         return 0;
176
177 unpin:
178         dev_err(dc->dev, "failed to map plane %u: %d\n", i, err);
179
180         while (i--) {
181                 host1x_bo_unpin(state->map[i]);
182                 state->iova[i] = DMA_MAPPING_ERROR;
183                 state->map[i] = NULL;
184         }
185
186         return err;
187 }
188
189 static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state)
190 {
191         unsigned int i;
192
193         for (i = 0; i < state->base.fb->format->num_planes; i++) {
194                 host1x_bo_unpin(state->map[i]);
195                 state->iova[i] = DMA_MAPPING_ERROR;
196                 state->map[i] = NULL;
197         }
198 }
199
200 int tegra_plane_prepare_fb(struct drm_plane *plane,
201                            struct drm_plane_state *state)
202 {
203         struct tegra_dc *dc = to_tegra_dc(state->crtc);
204         int err;
205
206         if (!state->fb)
207                 return 0;
208
209         err = drm_gem_plane_helper_prepare_fb(plane, state);
210         if (err < 0)
211                 return err;
212
213         return tegra_dc_pin(dc, to_tegra_plane_state(state));
214 }
215
216 void tegra_plane_cleanup_fb(struct drm_plane *plane,
217                             struct drm_plane_state *state)
218 {
219         struct tegra_dc *dc = to_tegra_dc(state->crtc);
220
221         if (dc)
222                 tegra_dc_unpin(dc, to_tegra_plane_state(state));
223 }
224
225 static int tegra_plane_calculate_memory_bandwidth(struct drm_plane_state *state)
226 {
227         struct tegra_plane_state *tegra_state = to_tegra_plane_state(state);
228         unsigned int i, bpp, dst_w, dst_h, src_w, src_h, mul;
229         const struct tegra_dc_soc_info *soc;
230         const struct drm_format_info *fmt;
231         struct drm_crtc_state *crtc_state;
232         u64 avg_bandwidth, peak_bandwidth;
233
234         if (!state->visible)
235                 return 0;
236
237         crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
238         if (!crtc_state)
239                 return -EINVAL;
240
241         src_w = drm_rect_width(&state->src) >> 16;
242         src_h = drm_rect_height(&state->src) >> 16;
243         dst_w = drm_rect_width(&state->dst);
244         dst_h = drm_rect_height(&state->dst);
245
246         fmt = state->fb->format;
247         soc = to_tegra_dc(state->crtc)->soc;
248
249         /*
250          * Note that real memory bandwidth vary depending on format and
251          * memory layout, we are not taking that into account because small
252          * estimation error isn't important since bandwidth is rounded up
253          * anyway.
254          */
255         for (i = 0, bpp = 0; i < fmt->num_planes; i++) {
256                 unsigned int bpp_plane = fmt->cpp[i] * 8;
257
258                 /*
259                  * Sub-sampling is relevant for chroma planes only and vertical
260                  * readouts are not cached, hence only horizontal sub-sampling
261                  * matters.
262                  */
263                 if (i > 0)
264                         bpp_plane /= fmt->hsub;
265
266                 bpp += bpp_plane;
267         }
268
269         /* average bandwidth in kbytes/sec */
270         avg_bandwidth  = min(src_w, dst_w) * min(src_h, dst_h);
271         avg_bandwidth *= drm_mode_vrefresh(&crtc_state->adjusted_mode);
272         avg_bandwidth  = DIV_ROUND_UP(avg_bandwidth * bpp, 8) + 999;
273         do_div(avg_bandwidth, 1000);
274
275         /* mode.clock in kHz, peak bandwidth in kbytes/sec */
276         peak_bandwidth = DIV_ROUND_UP(crtc_state->adjusted_mode.clock * bpp, 8);
277
278         /*
279          * Tegra30/114 Memory Controller can't interleave DC memory requests
280          * for the tiled windows because DC uses 16-bytes atom, while DDR3
281          * uses 32-bytes atom.  Hence there is x2 memory overfetch for tiled
282          * framebuffer and DDR3 on these SoCs.
283          */
284         if (soc->plane_tiled_memory_bandwidth_x2 &&
285             tegra_state->tiling.mode == TEGRA_BO_TILING_MODE_TILED)
286                 mul = 2;
287         else
288                 mul = 1;
289
290         /* ICC bandwidth in kbytes/sec */
291         tegra_state->peak_memory_bandwidth = kBps_to_icc(peak_bandwidth) * mul;
292         tegra_state->avg_memory_bandwidth  = kBps_to_icc(avg_bandwidth)  * mul;
293
294         return 0;
295 }
296
297 int tegra_plane_state_add(struct tegra_plane *plane,
298                           struct drm_plane_state *state)
299 {
300         struct drm_crtc_state *crtc_state;
301         struct tegra_dc_state *tegra;
302         int err;
303
304         /* Propagate errors from allocation or locking failures. */
305         crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
306         if (IS_ERR(crtc_state))
307                 return PTR_ERR(crtc_state);
308
309         /* Check plane state for visibility and calculate clipping bounds */
310         err = drm_atomic_helper_check_plane_state(state, crtc_state,
311                                                   0, INT_MAX, true, true);
312         if (err < 0)
313                 return err;
314
315         err = tegra_plane_calculate_memory_bandwidth(state);
316         if (err < 0)
317                 return err;
318
319         tegra = to_dc_state(crtc_state);
320
321         tegra->planes |= WIN_A_ACT_REQ << plane->index;
322
323         return 0;
324 }
325
326 int tegra_plane_format(u32 fourcc, u32 *format, u32 *swap)
327 {
328         /* assume no swapping of fetched data */
329         if (swap)
330                 *swap = BYTE_SWAP_NOSWAP;
331
332         switch (fourcc) {
333         case DRM_FORMAT_ARGB4444:
334                 *format = WIN_COLOR_DEPTH_B4G4R4A4;
335                 break;
336
337         case DRM_FORMAT_ARGB1555:
338                 *format = WIN_COLOR_DEPTH_B5G5R5A1;
339                 break;
340
341         case DRM_FORMAT_RGB565:
342                 *format = WIN_COLOR_DEPTH_B5G6R5;
343                 break;
344
345         case DRM_FORMAT_RGBA5551:
346                 *format = WIN_COLOR_DEPTH_A1B5G5R5;
347                 break;
348
349         case DRM_FORMAT_ARGB8888:
350                 *format = WIN_COLOR_DEPTH_B8G8R8A8;
351                 break;
352
353         case DRM_FORMAT_ABGR8888:
354                 *format = WIN_COLOR_DEPTH_R8G8B8A8;
355                 break;
356
357         case DRM_FORMAT_ABGR4444:
358                 *format = WIN_COLOR_DEPTH_R4G4B4A4;
359                 break;
360
361         case DRM_FORMAT_ABGR1555:
362                 *format = WIN_COLOR_DEPTH_R5G5B5A;
363                 break;
364
365         case DRM_FORMAT_BGRA5551:
366                 *format = WIN_COLOR_DEPTH_AR5G5B5;
367                 break;
368
369         case DRM_FORMAT_XRGB1555:
370                 *format = WIN_COLOR_DEPTH_B5G5R5X1;
371                 break;
372
373         case DRM_FORMAT_RGBX5551:
374                 *format = WIN_COLOR_DEPTH_X1B5G5R5;
375                 break;
376
377         case DRM_FORMAT_XBGR1555:
378                 *format = WIN_COLOR_DEPTH_R5G5B5X1;
379                 break;
380
381         case DRM_FORMAT_BGRX5551:
382                 *format = WIN_COLOR_DEPTH_X1R5G5B5;
383                 break;
384
385         case DRM_FORMAT_BGR565:
386                 *format = WIN_COLOR_DEPTH_R5G6B5;
387                 break;
388
389         case DRM_FORMAT_BGRA8888:
390                 *format = WIN_COLOR_DEPTH_A8R8G8B8;
391                 break;
392
393         case DRM_FORMAT_RGBA8888:
394                 *format = WIN_COLOR_DEPTH_A8B8G8R8;
395                 break;
396
397         case DRM_FORMAT_XRGB8888:
398                 *format = WIN_COLOR_DEPTH_B8G8R8X8;
399                 break;
400
401         case DRM_FORMAT_XBGR8888:
402                 *format = WIN_COLOR_DEPTH_R8G8B8X8;
403                 break;
404
405         case DRM_FORMAT_UYVY:
406                 *format = WIN_COLOR_DEPTH_YCbCr422;
407                 break;
408
409         case DRM_FORMAT_YUYV:
410                 if (!swap)
411                         return -EINVAL;
412
413                 *format = WIN_COLOR_DEPTH_YCbCr422;
414                 *swap = BYTE_SWAP_SWAP2;
415                 break;
416
417         case DRM_FORMAT_YVYU:
418                 if (!swap)
419                         return -EINVAL;
420
421                 *format = WIN_COLOR_DEPTH_YCbCr422;
422                 *swap = BYTE_SWAP_SWAP4;
423                 break;
424
425         case DRM_FORMAT_VYUY:
426                 if (!swap)
427                         return -EINVAL;
428
429                 *format = WIN_COLOR_DEPTH_YCbCr422;
430                 *swap = BYTE_SWAP_SWAP4HW;
431                 break;
432
433         case DRM_FORMAT_YUV420:
434                 *format = WIN_COLOR_DEPTH_YCbCr420P;
435                 break;
436
437         case DRM_FORMAT_YUV422:
438                 *format = WIN_COLOR_DEPTH_YCbCr422P;
439                 break;
440
441         case DRM_FORMAT_YUV444:
442                 *format = WIN_COLOR_DEPTH_YCbCr444P;
443                 break;
444
445         case DRM_FORMAT_NV12:
446                 *format = WIN_COLOR_DEPTH_YCbCr420SP;
447                 break;
448
449         case DRM_FORMAT_NV21:
450                 *format = WIN_COLOR_DEPTH_YCrCb420SP;
451                 break;
452
453         case DRM_FORMAT_NV16:
454                 *format = WIN_COLOR_DEPTH_YCbCr422SP;
455                 break;
456
457         case DRM_FORMAT_NV61:
458                 *format = WIN_COLOR_DEPTH_YCrCb422SP;
459                 break;
460
461         case DRM_FORMAT_NV24:
462                 *format = WIN_COLOR_DEPTH_YCbCr444SP;
463                 break;
464
465         case DRM_FORMAT_NV42:
466                 *format = WIN_COLOR_DEPTH_YCrCb444SP;
467                 break;
468
469         default:
470                 return -EINVAL;
471         }
472
473         return 0;
474 }
475
476 bool tegra_plane_format_is_indexed(unsigned int format)
477 {
478         switch (format) {
479         case WIN_COLOR_DEPTH_P1:
480         case WIN_COLOR_DEPTH_P2:
481         case WIN_COLOR_DEPTH_P4:
482         case WIN_COLOR_DEPTH_P8:
483                 return true;
484         }
485
486         return false;
487 }
488
489 bool tegra_plane_format_is_yuv(unsigned int format, unsigned int *planes, unsigned int *bpc)
490 {
491         switch (format) {
492         case WIN_COLOR_DEPTH_YCbCr422:
493         case WIN_COLOR_DEPTH_YUV422:
494                 if (planes)
495                         *planes = 1;
496
497                 if (bpc)
498                         *bpc = 8;
499
500                 return true;
501
502         case WIN_COLOR_DEPTH_YCbCr420P:
503         case WIN_COLOR_DEPTH_YUV420P:
504         case WIN_COLOR_DEPTH_YCbCr422P:
505         case WIN_COLOR_DEPTH_YUV422P:
506         case WIN_COLOR_DEPTH_YCbCr422R:
507         case WIN_COLOR_DEPTH_YUV422R:
508         case WIN_COLOR_DEPTH_YCbCr422RA:
509         case WIN_COLOR_DEPTH_YUV422RA:
510         case WIN_COLOR_DEPTH_YCbCr444P:
511                 if (planes)
512                         *planes = 3;
513
514                 if (bpc)
515                         *bpc = 8;
516
517                 return true;
518
519         case WIN_COLOR_DEPTH_YCrCb420SP:
520         case WIN_COLOR_DEPTH_YCbCr420SP:
521         case WIN_COLOR_DEPTH_YCrCb422SP:
522         case WIN_COLOR_DEPTH_YCbCr422SP:
523         case WIN_COLOR_DEPTH_YCrCb444SP:
524         case WIN_COLOR_DEPTH_YCbCr444SP:
525                 if (planes)
526                         *planes = 2;
527
528                 if (bpc)
529                         *bpc = 8;
530
531                 return true;
532         }
533
534         if (planes)
535                 *planes = 1;
536
537         return false;
538 }
539
540 static bool __drm_format_has_alpha(u32 format)
541 {
542         switch (format) {
543         case DRM_FORMAT_ARGB1555:
544         case DRM_FORMAT_RGBA5551:
545         case DRM_FORMAT_ABGR8888:
546         case DRM_FORMAT_ARGB8888:
547                 return true;
548         }
549
550         return false;
551 }
552
553 static int tegra_plane_format_get_alpha(unsigned int opaque,
554                                         unsigned int *alpha)
555 {
556         if (tegra_plane_format_is_yuv(opaque, NULL, NULL)) {
557                 *alpha = opaque;
558                 return 0;
559         }
560
561         switch (opaque) {
562         case WIN_COLOR_DEPTH_B5G5R5X1:
563                 *alpha = WIN_COLOR_DEPTH_B5G5R5A1;
564                 return 0;
565
566         case WIN_COLOR_DEPTH_X1B5G5R5:
567                 *alpha = WIN_COLOR_DEPTH_A1B5G5R5;
568                 return 0;
569
570         case WIN_COLOR_DEPTH_R8G8B8X8:
571                 *alpha = WIN_COLOR_DEPTH_R8G8B8A8;
572                 return 0;
573
574         case WIN_COLOR_DEPTH_B8G8R8X8:
575                 *alpha = WIN_COLOR_DEPTH_B8G8R8A8;
576                 return 0;
577
578         case WIN_COLOR_DEPTH_B5G6R5:
579                 *alpha = opaque;
580                 return 0;
581         }
582
583         return -EINVAL;
584 }
585
586 /*
587  * This is applicable to Tegra20 and Tegra30 only where the opaque formats can
588  * be emulated using the alpha formats and alpha blending disabled.
589  */
590 static int tegra_plane_setup_opacity(struct tegra_plane *tegra,
591                                      struct tegra_plane_state *state)
592 {
593         unsigned int format;
594         int err;
595
596         switch (state->format) {
597         case WIN_COLOR_DEPTH_B5G5R5A1:
598         case WIN_COLOR_DEPTH_A1B5G5R5:
599         case WIN_COLOR_DEPTH_R8G8B8A8:
600         case WIN_COLOR_DEPTH_B8G8R8A8:
601                 state->opaque = false;
602                 break;
603
604         default:
605                 err = tegra_plane_format_get_alpha(state->format, &format);
606                 if (err < 0)
607                         return err;
608
609                 state->format = format;
610                 state->opaque = true;
611                 break;
612         }
613
614         return 0;
615 }
616
617 static int tegra_plane_check_transparency(struct tegra_plane *tegra,
618                                           struct tegra_plane_state *state)
619 {
620         struct drm_plane_state *old, *plane_state;
621         struct drm_plane *plane;
622
623         old = drm_atomic_get_old_plane_state(state->base.state, &tegra->base);
624
625         /* check if zpos / transparency changed */
626         if (old->normalized_zpos == state->base.normalized_zpos &&
627             to_tegra_plane_state(old)->opaque == state->opaque)
628                 return 0;
629
630         /* include all sibling planes into this commit */
631         drm_for_each_plane(plane, tegra->base.dev) {
632                 struct tegra_plane *p = to_tegra_plane(plane);
633
634                 /* skip this plane and planes on different CRTCs */
635                 if (p == tegra || p->dc != tegra->dc)
636                         continue;
637
638                 plane_state = drm_atomic_get_plane_state(state->base.state,
639                                                          plane);
640                 if (IS_ERR(plane_state))
641                         return PTR_ERR(plane_state);
642         }
643
644         return 1;
645 }
646
647 static unsigned int tegra_plane_get_overlap_index(struct tegra_plane *plane,
648                                                   struct tegra_plane *other)
649 {
650         unsigned int index = 0, i;
651
652         WARN_ON(plane == other);
653
654         for (i = 0; i < 3; i++) {
655                 if (i == plane->index)
656                         continue;
657
658                 if (i == other->index)
659                         break;
660
661                 index++;
662         }
663
664         return index;
665 }
666
667 static void tegra_plane_update_transparency(struct tegra_plane *tegra,
668                                             struct tegra_plane_state *state)
669 {
670         struct drm_plane_state *new;
671         struct drm_plane *plane;
672         unsigned int i;
673
674         for_each_new_plane_in_state(state->base.state, plane, new, i) {
675                 struct tegra_plane *p = to_tegra_plane(plane);
676                 unsigned index;
677
678                 /* skip this plane and planes on different CRTCs */
679                 if (p == tegra || p->dc != tegra->dc)
680                         continue;
681
682                 index = tegra_plane_get_overlap_index(tegra, p);
683
684                 if (new->fb && __drm_format_has_alpha(new->fb->format->format))
685                         state->blending[index].alpha = true;
686                 else
687                         state->blending[index].alpha = false;
688
689                 if (new->normalized_zpos > state->base.normalized_zpos)
690                         state->blending[index].top = true;
691                 else
692                         state->blending[index].top = false;
693
694                 /*
695                  * Missing framebuffer means that plane is disabled, in this
696                  * case mark B / C window as top to be able to differentiate
697                  * windows indices order in regards to zPos for the middle
698                  * window X / Y registers programming.
699                  */
700                 if (!new->fb)
701                         state->blending[index].top = (index == 1);
702         }
703 }
704
705 static int tegra_plane_setup_transparency(struct tegra_plane *tegra,
706                                           struct tegra_plane_state *state)
707 {
708         struct tegra_plane_state *tegra_state;
709         struct drm_plane_state *new;
710         struct drm_plane *plane;
711         int err;
712
713         /*
714          * If planes zpos / transparency changed, sibling planes blending
715          * state may require adjustment and in this case they will be included
716          * into this atom commit, otherwise blending state is unchanged.
717          */
718         err = tegra_plane_check_transparency(tegra, state);
719         if (err <= 0)
720                 return err;
721
722         /*
723          * All planes are now in the atomic state, walk them up and update
724          * transparency state for each plane.
725          */
726         drm_for_each_plane(plane, tegra->base.dev) {
727                 struct tegra_plane *p = to_tegra_plane(plane);
728
729                 /* skip planes on different CRTCs */
730                 if (p->dc != tegra->dc)
731                         continue;
732
733                 new = drm_atomic_get_new_plane_state(state->base.state, plane);
734                 tegra_state = to_tegra_plane_state(new);
735
736                 /*
737                  * There is no need to update blending state for the disabled
738                  * plane.
739                  */
740                 if (new->fb)
741                         tegra_plane_update_transparency(p, tegra_state);
742         }
743
744         return 0;
745 }
746
747 int tegra_plane_setup_legacy_state(struct tegra_plane *tegra,
748                                    struct tegra_plane_state *state)
749 {
750         int err;
751
752         err = tegra_plane_setup_opacity(tegra, state);
753         if (err < 0)
754                 return err;
755
756         err = tegra_plane_setup_transparency(tegra, state);
757         if (err < 0)
758                 return err;
759
760         return 0;
761 }
762
763 static const char * const tegra_plane_icc_names[TEGRA_DC_LEGACY_PLANES_NUM] = {
764         "wina", "winb", "winc", NULL, NULL, NULL, "cursor",
765 };
766
767 int tegra_plane_interconnect_init(struct tegra_plane *plane)
768 {
769         const char *icc_name = tegra_plane_icc_names[plane->index];
770         struct device *dev = plane->dc->dev;
771         struct tegra_dc *dc = plane->dc;
772         int err;
773
774         if (WARN_ON(plane->index >= TEGRA_DC_LEGACY_PLANES_NUM) ||
775             WARN_ON(!tegra_plane_icc_names[plane->index]))
776                 return -EINVAL;
777
778         plane->icc_mem = devm_of_icc_get(dev, icc_name);
779         err = PTR_ERR_OR_ZERO(plane->icc_mem);
780         if (err)
781                 return dev_err_probe(dev, err, "failed to get %s interconnect\n",
782                                      icc_name);
783
784         /* plane B on T20/30 has a dedicated memory client for a 6-tap vertical filter */
785         if (plane->index == 1 && dc->soc->has_win_b_vfilter_mem_client) {
786                 plane->icc_mem_vfilter = devm_of_icc_get(dev, "winb-vfilter");
787                 err = PTR_ERR_OR_ZERO(plane->icc_mem_vfilter);
788                 if (err)
789                         return dev_err_probe(dev, err, "failed to get %s interconnect\n",
790                                              "winb-vfilter");
791         }
792
793         return 0;
794 }
This page took 0.076976 seconds and 4 git commands to generate.