1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
7 #include <linux/delay.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/host1x.h>
10 #include <linux/module.h>
12 #include <linux/of_device.h>
13 #include <linux/of_graph.h>
14 #include <linux/platform_device.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/reset.h>
18 #include <drm/drm_atomic.h>
19 #include <drm/drm_atomic_helper.h>
20 #include <drm/drm_blend.h>
21 #include <drm/drm_fourcc.h>
22 #include <drm/drm_framebuffer.h>
23 #include <drm/drm_probe_helper.h>
31 static const u32 tegra_shared_plane_formats[] = {
55 static const u64 tegra_shared_plane_modifiers[] = {
56 DRM_FORMAT_MOD_LINEAR,
57 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
58 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
59 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
60 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
61 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
62 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
64 * The GPU sector layout is only supported on Tegra194, but these will
65 * be filtered out later on by ->format_mod_supported() on SoCs where
68 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
69 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
70 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
71 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
72 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
73 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
75 DRM_FORMAT_MOD_INVALID
78 static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
81 if (offset >= 0x500 && offset <= 0x581) {
82 offset = 0x000 + (offset - 0x500);
83 return plane->offset + offset;
86 if (offset >= 0x700 && offset <= 0x73c) {
87 offset = 0x180 + (offset - 0x700);
88 return plane->offset + offset;
91 if (offset >= 0x800 && offset <= 0x83e) {
92 offset = 0x1c0 + (offset - 0x800);
93 return plane->offset + offset;
96 dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
98 return plane->offset + offset;
101 static inline u32 tegra_plane_readl(struct tegra_plane *plane,
104 return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
107 static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
110 tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
113 static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
117 mutex_lock(&wgrp->lock);
119 if (wgrp->usecount == 0) {
120 err = host1x_client_resume(wgrp->parent);
122 dev_err(wgrp->parent->dev, "failed to resume: %d\n", err);
126 reset_control_deassert(wgrp->rst);
132 mutex_unlock(&wgrp->lock);
136 static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
140 mutex_lock(&wgrp->lock);
142 if (wgrp->usecount == 1) {
143 err = reset_control_assert(wgrp->rst);
145 pr_err("failed to assert reset for window group %u\n",
149 host1x_client_suspend(wgrp->parent);
153 mutex_unlock(&wgrp->lock);
156 int tegra_display_hub_prepare(struct tegra_display_hub *hub)
161 * XXX Enabling/disabling windowgroups needs to happen when the owner
162 * display controller is disabled. There's currently no good point at
163 * which this could be executed, so unconditionally enable all window
166 for (i = 0; i < hub->soc->num_wgrps; i++) {
167 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
169 /* Skip orphaned window group whose parent DC is disabled */
171 tegra_windowgroup_enable(wgrp);
177 void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
182 * XXX Remove this once window groups can be more fine-grainedly
183 * enabled and disabled.
185 for (i = 0; i < hub->soc->num_wgrps; i++) {
186 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
188 /* Skip orphaned window group whose parent DC is disabled */
190 tegra_windowgroup_disable(wgrp);
194 static void tegra_shared_plane_update(struct tegra_plane *plane)
196 struct tegra_dc *dc = plane->dc;
197 unsigned long timeout;
200 mask = COMMON_UPDATE | WIN_A_UPDATE << plane->base.index;
201 tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
203 timeout = jiffies + msecs_to_jiffies(1000);
205 while (time_before(jiffies, timeout)) {
206 value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
207 if ((value & mask) == 0)
210 usleep_range(100, 400);
214 static void tegra_shared_plane_activate(struct tegra_plane *plane)
216 struct tegra_dc *dc = plane->dc;
217 unsigned long timeout;
220 mask = COMMON_ACTREQ | WIN_A_ACT_REQ << plane->base.index;
221 tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
223 timeout = jiffies + msecs_to_jiffies(1000);
225 while (time_before(jiffies, timeout)) {
226 value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
227 if ((value & mask) == 0)
230 usleep_range(100, 400);
235 tegra_shared_plane_get_owner(struct tegra_plane *plane, struct tegra_dc *dc)
237 unsigned int offset =
238 tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
240 return tegra_dc_readl(dc, offset) & OWNER_MASK;
243 static bool tegra_dc_owns_shared_plane(struct tegra_dc *dc,
244 struct tegra_plane *plane)
246 struct device *dev = dc->dev;
248 if (tegra_shared_plane_get_owner(plane, dc) == dc->pipe) {
252 dev_WARN(dev, "head %u owns window %u but is not attached\n",
253 dc->pipe, plane->index);
259 static int tegra_shared_plane_set_owner(struct tegra_plane *plane,
260 struct tegra_dc *new)
262 unsigned int offset =
263 tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
264 struct tegra_dc *old = plane->dc, *dc = new ? new : old;
265 struct device *dev = new ? new->dev : old->dev;
266 unsigned int owner, index = plane->index;
269 value = tegra_dc_readl(dc, offset);
270 owner = value & OWNER_MASK;
272 if (new && (owner != OWNER_MASK && owner != new->pipe)) {
273 dev_WARN(dev, "window %u owned by head %u\n", index, owner);
278 * This seems to happen whenever the head has been disabled with one
279 * or more windows being active. This is harmless because we'll just
280 * reassign the window to the new head anyway.
282 if (old && owner == OWNER_MASK)
283 dev_dbg(dev, "window %u not owned by head %u but %u\n", index,
286 value &= ~OWNER_MASK;
289 value |= OWNER(new->pipe);
293 tegra_dc_writel(dc, value, offset);
300 static void tegra_shared_plane_setup_scaler(struct tegra_plane *plane)
302 static const unsigned int coeffs[192] = {
303 0x00000000, 0x3c70e400, 0x3bb037e4, 0x0c51cc9c,
304 0x00100001, 0x3bf0dbfa, 0x3d00f406, 0x3fe003ff,
305 0x00300002, 0x3b80cbf5, 0x3da1040d, 0x3fb003fe,
306 0x00400002, 0x3b20bff1, 0x3e511015, 0x3f9003fc,
307 0x00500002, 0x3ad0b3ed, 0x3f21201d, 0x3f5003fb,
308 0x00500003, 0x3aa0a3e9, 0x3ff13026, 0x3f2007f9,
309 0x00500403, 0x3a7097e6, 0x00e1402f, 0x3ee007f7,
310 0x00500403, 0x3a608be4, 0x01d14c38, 0x3ea00bf6,
311 0x00500403, 0x3a507fe2, 0x02e15c42, 0x3e500ff4,
312 0x00500402, 0x3a6073e1, 0x03f16c4d, 0x3e000ff2,
313 0x00400402, 0x3a706be0, 0x05117858, 0x3db013f0,
314 0x00300402, 0x3a905fe0, 0x06318863, 0x3d6017ee,
315 0x00300402, 0x3ab057e0, 0x0771986e, 0x3d001beb,
316 0x00200001, 0x3af04fe1, 0x08a1a47a, 0x3cb023e9,
317 0x00100001, 0x3b2047e2, 0x09e1b485, 0x3c6027e7,
318 0x00100000, 0x3b703fe2, 0x0b11c091, 0x3c002fe6,
319 0x3f203800, 0x0391103f, 0x3ff0a014, 0x0811606c,
320 0x3f2037ff, 0x0351083c, 0x03e11842, 0x3f203c00,
321 0x3f302fff, 0x03010439, 0x04311c45, 0x3f104401,
322 0x3f302fff, 0x02c0fc35, 0x04812448, 0x3f104802,
323 0x3f4027ff, 0x0270f832, 0x04c1284b, 0x3f205003,
324 0x3f4023ff, 0x0230f030, 0x0511304e, 0x3f205403,
325 0x3f601fff, 0x01f0e82d, 0x05613451, 0x3f205c04,
326 0x3f701bfe, 0x01b0e02a, 0x05a13c54, 0x3f306006,
327 0x3f7017fe, 0x0170d827, 0x05f14057, 0x3f406807,
328 0x3f8017ff, 0x0140d424, 0x0641445a, 0x3f406c08,
329 0x3fa013ff, 0x0100cc22, 0x0681485d, 0x3f507409,
330 0x3fa00fff, 0x00d0c41f, 0x06d14c60, 0x3f607c0b,
331 0x3fc00fff, 0x0090bc1c, 0x07115063, 0x3f80840c,
332 0x3fd00bff, 0x0070b41a, 0x07515465, 0x3f908c0e,
333 0x3fe007ff, 0x0040b018, 0x07915868, 0x3fb0900f,
334 0x3ff00400, 0x0010a816, 0x07d15c6a, 0x3fd09811,
335 0x00a04c0e, 0x0460f442, 0x0240a827, 0x05c15859,
336 0x0090440d, 0x0440f040, 0x0480fc43, 0x00b05010,
337 0x0080400c, 0x0410ec3e, 0x04910044, 0x00d05411,
338 0x0070380b, 0x03f0e83d, 0x04b10846, 0x00e05812,
339 0x0060340a, 0x03d0e43b, 0x04d10c48, 0x00f06013,
340 0x00503009, 0x03b0e039, 0x04e11449, 0x01106415,
341 0x00402c08, 0x0390d838, 0x05011c4b, 0x01206c16,
342 0x00302807, 0x0370d436, 0x0511204c, 0x01407018,
343 0x00302406, 0x0340d034, 0x0531244e, 0x01507419,
344 0x00202005, 0x0320cc32, 0x05412c50, 0x01707c1b,
345 0x00101c04, 0x0300c431, 0x05613451, 0x0180801d,
346 0x00101803, 0x02e0c02f, 0x05713853, 0x01a0881e,
347 0x00101002, 0x02b0bc2d, 0x05814054, 0x01c08c20,
348 0x00000c02, 0x02a0b82c, 0x05914455, 0x01e09421,
349 0x00000801, 0x0280b02a, 0x05a14c57, 0x02009c23,
350 0x00000400, 0x0260ac28, 0x05b15458, 0x0220a025,
352 unsigned int ratio, row, column;
354 for (ratio = 0; ratio <= 2; ratio++) {
355 for (row = 0; row <= 15; row++) {
356 for (column = 0; column <= 3; column++) {
357 unsigned int index = (ratio << 6) + (row << 2) + column;
360 value = COEFF_INDEX(index) | COEFF_DATA(coeffs[index]);
361 tegra_plane_writel(plane, value,
362 DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_COEFF);
368 static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
369 struct tegra_plane *plane)
374 if (!tegra_dc_owns_shared_plane(dc, plane)) {
375 err = tegra_shared_plane_set_owner(plane, dc);
380 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
381 value |= MODE_FOUR_LINES;
382 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
384 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
386 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
388 /* disable watermark */
389 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
390 value &= ~LATENCY_CTL_MODE_ENABLE;
391 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
393 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
394 value |= WATERMARK_MASK;
395 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
398 value = tegra_plane_readl(plane, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
399 value = PIPE_METER_INT(0) | PIPE_METER_FRAC(0);
400 tegra_plane_writel(plane, value, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
402 /* mempool entries */
403 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
404 value = MEMPOOL_ENTRIES(0x331);
405 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
407 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_THREAD_GROUP);
408 value &= ~THREAD_NUM_MASK;
409 value |= THREAD_NUM(plane->base.index);
410 value |= THREAD_GROUP_ENABLE;
411 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
413 tegra_shared_plane_setup_scaler(plane);
415 tegra_shared_plane_update(plane);
416 tegra_shared_plane_activate(plane);
419 static void tegra_dc_remove_shared_plane(struct tegra_dc *dc,
420 struct tegra_plane *plane)
422 tegra_shared_plane_set_owner(plane, NULL);
425 static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
426 struct drm_atomic_state *state)
428 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
430 struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
431 struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane);
432 struct tegra_bo_tiling *tiling = &plane_state->tiling;
433 struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc);
436 /* no need for further checks if the plane is being disabled */
437 if (!new_plane_state->crtc || !new_plane_state->fb)
440 err = tegra_plane_format(new_plane_state->fb->format->format,
441 &plane_state->format,
446 err = tegra_fb_get_tiling(new_plane_state->fb, tiling);
450 if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
451 !dc->soc->supports_block_linear) {
452 DRM_ERROR("hardware doesn't support block linear mode\n");
456 if (tiling->sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU &&
457 !dc->soc->supports_sector_layout) {
458 DRM_ERROR("hardware doesn't support GPU sector layout\n");
463 * Tegra doesn't support different strides for U and V planes so we
464 * error out if the user tries to display a framebuffer with such a
467 if (new_plane_state->fb->format->num_planes > 2) {
468 if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) {
469 DRM_ERROR("unsupported UV-plane configuration\n");
474 /* XXX scaling is not yet supported, add a check here */
476 err = tegra_plane_state_add(&tegra->base, new_plane_state);
483 static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
484 struct drm_atomic_state *state)
486 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
488 struct tegra_plane *p = to_tegra_plane(plane);
493 /* rien ne va plus */
494 if (!old_state || !old_state->crtc)
497 dc = to_tegra_dc(old_state->crtc);
499 err = host1x_client_resume(&dc->client);
501 dev_err(dc->dev, "failed to resume: %d\n", err);
506 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
507 * on planes that are already disabled. Make sure we fallback to the
508 * head for this particular state instead of crashing.
510 if (WARN_ON(p->dc == NULL))
513 value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
514 value &= ~WIN_ENABLE;
515 tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
517 tegra_dc_remove_shared_plane(dc, p);
519 host1x_client_suspend(&dc->client);
522 static inline u32 compute_phase_incr(fixed20_12 in, unsigned int out)
526 tmp = (u64)dfixed_trunc(in);
528 tmp1 = (tmp << NFB) + (tmp2 >> 1);
531 return lower_32_bits(tmp1);
534 static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
535 struct drm_atomic_state *state)
537 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
539 struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
540 struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
541 unsigned int zpos = new_state->normalized_zpos;
542 struct drm_framebuffer *fb = new_state->fb;
543 struct tegra_plane *p = to_tegra_plane(plane);
544 u32 value, min_width, bypass = 0;
545 dma_addr_t base, addr_flag = 0;
546 unsigned int bpc, planes;
550 /* rien ne va plus */
551 if (!new_state->crtc || !new_state->fb)
554 if (!new_state->visible) {
555 tegra_shared_plane_atomic_disable(plane, state);
559 err = host1x_client_resume(&dc->client);
561 dev_err(dc->dev, "failed to resume: %d\n", err);
565 yuv = tegra_plane_format_is_yuv(tegra_plane_state->format, &planes, &bpc);
567 tegra_dc_assign_shared_plane(dc, p);
569 tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
572 value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
573 BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
574 BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
575 tegra_plane_writel(p, value, DC_WIN_BLEND_MATCH_SELECT);
577 value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
578 BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
579 BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
580 tegra_plane_writel(p, value, DC_WIN_BLEND_NOMATCH_SELECT);
582 value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - zpos);
583 tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
586 min_width = min(new_state->src_w >> 16, new_state->crtc_w);
588 value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPC);
590 if (min_width < MAX_PIXELS_5TAP444(value)) {
591 value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
593 value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPE);
595 if (min_width < MAX_PIXELS_2TAP444(value))
596 value = HORIZONTAL_TAPS_2 | VERTICAL_TAPS_2;
598 dev_err(dc->dev, "invalid minimum width: %u\n", min_width);
601 value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
602 tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
604 if (new_state->src_w != new_state->crtc_w << 16) {
605 fixed20_12 width = dfixed_init(new_state->src_w >> 16);
606 u32 incr = compute_phase_incr(width, new_state->crtc_w) & ~0x1;
607 u32 init = (1 << (NFB - 1)) + (incr >> 1);
609 tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_HPHASE_INCR);
610 tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_H_START_PHASE);
612 bypass |= INPUT_SCALER_HBYPASS;
615 if (new_state->src_h != new_state->crtc_h << 16) {
616 fixed20_12 height = dfixed_init(new_state->src_h >> 16);
617 u32 incr = compute_phase_incr(height, new_state->crtc_h) & ~0x1;
618 u32 init = (1 << (NFB - 1)) + (incr >> 1);
620 tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_VPHASE_INCR);
621 tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_V_START_PHASE);
623 bypass |= INPUT_SCALER_VBYPASS;
626 tegra_plane_writel(p, bypass, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
628 /* disable compression */
629 tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
631 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
633 * Physical address bit 39 in Tegra194 is used as a switch for special
634 * logic that swizzles the memory using either the legacy Tegra or the
635 * dGPU sector layout.
637 if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
638 addr_flag = BIT_ULL(39);
641 base = tegra_plane_state->iova[0] + fb->offsets[0];
644 tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
645 tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
647 value = V_POSITION(new_state->crtc_y) |
648 H_POSITION(new_state->crtc_x);
649 tegra_plane_writel(p, value, DC_WIN_POSITION);
651 value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w);
652 tegra_plane_writel(p, value, DC_WIN_SIZE);
654 value = WIN_ENABLE | COLOR_EXPAND;
655 tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
657 value = V_SIZE(new_state->src_h >> 16) | H_SIZE(new_state->src_w >> 16);
658 tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
660 tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
661 tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR);
663 value = PITCH(fb->pitches[0]);
664 tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
666 if (yuv && planes > 1) {
667 base = tegra_plane_state->iova[1] + fb->offsets[1];
670 tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_U);
671 tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_U);
674 base = tegra_plane_state->iova[2] + fb->offsets[2];
677 tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_V);
678 tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_V);
681 value = PITCH_U(fb->pitches[1]);
684 value |= PITCH_V(fb->pitches[2]);
686 tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE_UV);
688 tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_U);
689 tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_U);
690 tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_V);
691 tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_V);
692 tegra_plane_writel(p, 0, DC_WIN_PLANAR_STORAGE_UV);
695 value = CLAMP_BEFORE_BLEND | INPUT_RANGE_FULL;
699 value |= DEGAMMA_YUV8_10;
701 value |= DEGAMMA_YUV12;
703 /* XXX parameterize */
704 value |= COLOR_SPACE_YUV_2020;
706 if (!tegra_plane_format_is_indexed(tegra_plane_state->format))
707 value |= DEGAMMA_SRGB;
710 tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
712 value = OFFSET_X(new_state->src_y >> 16) |
713 OFFSET_Y(new_state->src_x >> 16);
714 tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
716 if (dc->soc->supports_block_linear) {
717 unsigned long height = tegra_plane_state->tiling.value;
720 switch (tegra_plane_state->tiling.mode) {
721 case TEGRA_BO_TILING_MODE_PITCH:
722 value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) |
723 DC_WINBUF_SURFACE_KIND_PITCH;
726 /* XXX not supported on Tegra186 and later */
727 case TEGRA_BO_TILING_MODE_TILED:
728 value = DC_WINBUF_SURFACE_KIND_TILED;
731 case TEGRA_BO_TILING_MODE_BLOCK:
732 value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
733 DC_WINBUF_SURFACE_KIND_BLOCK;
737 tegra_plane_writel(p, value, DC_WINBUF_SURFACE_KIND);
740 /* disable gamut CSC */
741 value = tegra_plane_readl(p, DC_WIN_WINDOW_SET_CONTROL);
742 value &= ~CONTROL_CSC_ENABLE;
743 tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
745 host1x_client_suspend(&dc->client);
748 static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
749 .prepare_fb = tegra_plane_prepare_fb,
750 .cleanup_fb = tegra_plane_cleanup_fb,
751 .atomic_check = tegra_shared_plane_atomic_check,
752 .atomic_update = tegra_shared_plane_atomic_update,
753 .atomic_disable = tegra_shared_plane_atomic_disable,
756 struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
761 enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
762 struct tegra_drm *tegra = drm->dev_private;
763 struct tegra_display_hub *hub = tegra->hub;
764 struct tegra_shared_plane *plane;
765 unsigned int possible_crtcs;
766 unsigned int num_formats;
767 const u64 *modifiers;
772 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
774 return ERR_PTR(-ENOMEM);
776 plane->base.offset = 0x0a00 + 0x0300 * index;
777 plane->base.index = index;
779 plane->wgrp = &hub->wgrps[wgrp];
780 plane->wgrp->parent = &dc->client;
782 p = &plane->base.base;
784 /* planes can be assigned to arbitrary CRTCs */
785 possible_crtcs = BIT(tegra->num_crtcs) - 1;
787 num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
788 formats = tegra_shared_plane_formats;
789 modifiers = tegra_shared_plane_modifiers;
791 err = drm_universal_plane_init(drm, p, possible_crtcs,
792 &tegra_plane_funcs, formats,
793 num_formats, modifiers, type, NULL);
799 drm_plane_helper_add(p, &tegra_shared_plane_helper_funcs);
800 drm_plane_create_zpos_property(p, 0, 0, 255);
805 static struct drm_private_state *
806 tegra_display_hub_duplicate_state(struct drm_private_obj *obj)
808 struct tegra_display_hub_state *state;
810 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
814 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
819 static void tegra_display_hub_destroy_state(struct drm_private_obj *obj,
820 struct drm_private_state *state)
822 struct tegra_display_hub_state *hub_state =
823 to_tegra_display_hub_state(state);
828 static const struct drm_private_state_funcs tegra_display_hub_state_funcs = {
829 .atomic_duplicate_state = tegra_display_hub_duplicate_state,
830 .atomic_destroy_state = tegra_display_hub_destroy_state,
833 static struct tegra_display_hub_state *
834 tegra_display_hub_get_state(struct tegra_display_hub *hub,
835 struct drm_atomic_state *state)
837 struct drm_private_state *priv;
839 priv = drm_atomic_get_private_obj_state(state, &hub->base);
841 return ERR_CAST(priv);
843 return to_tegra_display_hub_state(priv);
846 int tegra_display_hub_atomic_check(struct drm_device *drm,
847 struct drm_atomic_state *state)
849 struct tegra_drm *tegra = drm->dev_private;
850 struct tegra_display_hub_state *hub_state;
851 struct drm_crtc_state *old, *new;
852 struct drm_crtc *crtc;
858 hub_state = tegra_display_hub_get_state(tegra->hub, state);
859 if (IS_ERR(hub_state))
860 return PTR_ERR(hub_state);
863 * The display hub display clock needs to be fed by the display clock
864 * with the highest frequency to ensure proper functioning of all the
867 * Note that this isn't used before Tegra186, but it doesn't hurt and
868 * conditionalizing it would make the code less clean.
870 for_each_oldnew_crtc_in_state(state, crtc, old, new, i) {
871 struct tegra_dc_state *dc = to_dc_state(new);
874 if (!hub_state->clk || dc->pclk > hub_state->rate) {
875 hub_state->dc = to_tegra_dc(dc->base.crtc);
876 hub_state->clk = hub_state->dc->clk;
877 hub_state->rate = dc->pclk;
885 static void tegra_display_hub_update(struct tegra_dc *dc)
890 err = host1x_client_resume(&dc->client);
892 dev_err(dc->dev, "failed to resume: %d\n", err);
896 value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
897 value &= ~LATENCY_EVENT;
898 tegra_dc_writel(dc, value, DC_CMD_IHUB_COMMON_MISC_CTL);
900 value = tegra_dc_readl(dc, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
901 value = CURS_SLOTS(1) | WGRP_SLOTS(1);
902 tegra_dc_writel(dc, value, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
904 tegra_dc_writel(dc, COMMON_UPDATE, DC_CMD_STATE_CONTROL);
905 tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
906 tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
907 tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
909 host1x_client_suspend(&dc->client);
912 void tegra_display_hub_atomic_commit(struct drm_device *drm,
913 struct drm_atomic_state *state)
915 struct tegra_drm *tegra = drm->dev_private;
916 struct tegra_display_hub *hub = tegra->hub;
917 struct tegra_display_hub_state *hub_state;
918 struct device *dev = hub->client.dev;
921 hub_state = to_tegra_display_hub_state(hub->base.state);
923 if (hub_state->clk) {
924 err = clk_set_rate(hub_state->clk, hub_state->rate);
926 dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
927 hub_state->clk, hub_state->rate);
929 err = clk_set_parent(hub->clk_disp, hub_state->clk);
931 dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
932 hub->clk_disp, hub_state->clk, err);
936 tegra_display_hub_update(hub_state->dc);
939 static int tegra_display_hub_init(struct host1x_client *client)
941 struct tegra_display_hub *hub = to_tegra_display_hub(client);
942 struct drm_device *drm = dev_get_drvdata(client->host);
943 struct tegra_drm *tegra = drm->dev_private;
944 struct tegra_display_hub_state *state;
946 state = kzalloc(sizeof(*state), GFP_KERNEL);
950 drm_atomic_private_obj_init(drm, &hub->base, &state->base,
951 &tegra_display_hub_state_funcs);
958 static int tegra_display_hub_exit(struct host1x_client *client)
960 struct drm_device *drm = dev_get_drvdata(client->host);
961 struct tegra_drm *tegra = drm->dev_private;
963 drm_atomic_private_obj_fini(&tegra->hub->base);
969 static int tegra_display_hub_runtime_suspend(struct host1x_client *client)
971 struct tegra_display_hub *hub = to_tegra_display_hub(client);
972 struct device *dev = client->dev;
973 unsigned int i = hub->num_heads;
976 err = reset_control_assert(hub->rst);
981 clk_disable_unprepare(hub->clk_heads[i]);
983 clk_disable_unprepare(hub->clk_hub);
984 clk_disable_unprepare(hub->clk_dsc);
985 clk_disable_unprepare(hub->clk_disp);
987 pm_runtime_put_sync(dev);
992 static int tegra_display_hub_runtime_resume(struct host1x_client *client)
994 struct tegra_display_hub *hub = to_tegra_display_hub(client);
995 struct device *dev = client->dev;
999 err = pm_runtime_resume_and_get(dev);
1001 dev_err(dev, "failed to get runtime PM: %d\n", err);
1005 err = clk_prepare_enable(hub->clk_disp);
1009 err = clk_prepare_enable(hub->clk_dsc);
1013 err = clk_prepare_enable(hub->clk_hub);
1017 for (i = 0; i < hub->num_heads; i++) {
1018 err = clk_prepare_enable(hub->clk_heads[i]);
1023 err = reset_control_deassert(hub->rst);
1031 clk_disable_unprepare(hub->clk_heads[i]);
1033 clk_disable_unprepare(hub->clk_hub);
1035 clk_disable_unprepare(hub->clk_dsc);
1037 clk_disable_unprepare(hub->clk_disp);
1039 pm_runtime_put_sync(dev);
1043 static const struct host1x_client_ops tegra_display_hub_ops = {
1044 .init = tegra_display_hub_init,
1045 .exit = tegra_display_hub_exit,
1046 .suspend = tegra_display_hub_runtime_suspend,
1047 .resume = tegra_display_hub_runtime_resume,
1050 static int tegra_display_hub_probe(struct platform_device *pdev)
1052 u64 dma_mask = dma_get_mask(pdev->dev.parent);
1053 struct device_node *child = NULL;
1054 struct tegra_display_hub *hub;
1059 err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
1061 dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
1065 hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
1069 hub->soc = of_device_get_match_data(&pdev->dev);
1071 hub->clk_disp = devm_clk_get(&pdev->dev, "disp");
1072 if (IS_ERR(hub->clk_disp)) {
1073 err = PTR_ERR(hub->clk_disp);
1077 if (hub->soc->supports_dsc) {
1078 hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
1079 if (IS_ERR(hub->clk_dsc)) {
1080 err = PTR_ERR(hub->clk_dsc);
1085 hub->clk_hub = devm_clk_get(&pdev->dev, "hub");
1086 if (IS_ERR(hub->clk_hub)) {
1087 err = PTR_ERR(hub->clk_hub);
1091 hub->rst = devm_reset_control_get(&pdev->dev, "misc");
1092 if (IS_ERR(hub->rst)) {
1093 err = PTR_ERR(hub->rst);
1097 hub->wgrps = devm_kcalloc(&pdev->dev, hub->soc->num_wgrps,
1098 sizeof(*hub->wgrps), GFP_KERNEL);
1102 for (i = 0; i < hub->soc->num_wgrps; i++) {
1103 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1106 snprintf(id, sizeof(id), "wgrp%u", i);
1107 mutex_init(&wgrp->lock);
1111 wgrp->rst = devm_reset_control_get(&pdev->dev, id);
1112 if (IS_ERR(wgrp->rst))
1113 return PTR_ERR(wgrp->rst);
1115 err = reset_control_assert(wgrp->rst);
1120 hub->num_heads = of_get_child_count(pdev->dev.of_node);
1122 hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
1124 if (!hub->clk_heads)
1127 for (i = 0; i < hub->num_heads; i++) {
1128 child = of_get_next_child(pdev->dev.of_node, child);
1130 dev_err(&pdev->dev, "failed to find node for head %u\n",
1135 clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
1137 dev_err(&pdev->dev, "failed to get clock for head %u\n",
1140 return PTR_ERR(clk);
1143 hub->clk_heads[i] = clk;
1148 /* XXX: enable clock across reset? */
1149 err = reset_control_assert(hub->rst);
1153 platform_set_drvdata(pdev, hub);
1154 pm_runtime_enable(&pdev->dev);
1156 INIT_LIST_HEAD(&hub->client.list);
1157 hub->client.ops = &tegra_display_hub_ops;
1158 hub->client.dev = &pdev->dev;
1160 err = host1x_client_register(&hub->client);
1162 dev_err(&pdev->dev, "failed to register host1x client: %d\n",
1165 err = devm_of_platform_populate(&pdev->dev);
1172 host1x_client_unregister(&hub->client);
1173 pm_runtime_disable(&pdev->dev);
1177 static void tegra_display_hub_remove(struct platform_device *pdev)
1179 struct tegra_display_hub *hub = platform_get_drvdata(pdev);
1182 host1x_client_unregister(&hub->client);
1184 for (i = 0; i < hub->soc->num_wgrps; i++) {
1185 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1187 mutex_destroy(&wgrp->lock);
1190 pm_runtime_disable(&pdev->dev);
1193 static const struct tegra_display_hub_soc tegra186_display_hub = {
1195 .supports_dsc = true,
1198 static const struct tegra_display_hub_soc tegra194_display_hub = {
1200 .supports_dsc = false,
1203 static const struct of_device_id tegra_display_hub_of_match[] = {
1205 .compatible = "nvidia,tegra194-display",
1206 .data = &tegra194_display_hub
1208 .compatible = "nvidia,tegra186-display",
1209 .data = &tegra186_display_hub
1214 MODULE_DEVICE_TABLE(of, tegra_display_hub_of_match);
1216 struct platform_driver tegra_display_hub_driver = {
1218 .name = "tegra-display-hub",
1219 .of_match_table = tegra_display_hub_of_match,
1221 .probe = tegra_display_hub_probe,
1222 .remove_new = tegra_display_hub_remove,