2 * Copyright (c) 2011-2013, NVIDIA Corporation.
3 * Copyright 2014 Google Inc.
5 * SPDX-License-Identifier: GPL-2.0
13 #include <video_bridge.h>
15 #include <asm/arch-tegra/dc.h>
19 #include "displayport.h"
21 #define DO_FAST_LINK_TRAINING 1
23 struct tegra_dp_plat {
28 * struct tegra_dp_priv - private displayport driver info
30 * @dc_dev: Display controller device that is sending the video feed
32 struct tegra_dp_priv {
34 struct udevice *dc_dev;
35 struct dpaux_ctlr *regs;
40 struct tegra_dp_priv dp_data;
42 static inline u32 tegra_dpaux_readl(struct tegra_dp_priv *dp, u32 reg)
44 return readl((u32 *)dp->regs + reg);
47 static inline void tegra_dpaux_writel(struct tegra_dp_priv *dp, u32 reg,
50 writel(val, (u32 *)dp->regs + reg);
53 static inline u32 tegra_dc_dpaux_poll_register(struct tegra_dp_priv *dp,
54 u32 reg, u32 mask, u32 exp_val,
59 u32 temp = timeout_us;
62 udelay(poll_interval_us);
63 reg_val = tegra_dpaux_readl(dp, reg);
64 if (timeout_us > poll_interval_us)
65 timeout_us -= poll_interval_us;
68 } while ((reg_val & mask) != exp_val);
70 if ((reg_val & mask) == exp_val)
71 return 0; /* success */
72 debug("dpaux_poll_register 0x%x: timeout: (reg_val)0x%08x & (mask)0x%08x != (exp_val)0x%08x\n",
73 reg, reg_val, mask, exp_val);
77 static inline int tegra_dpaux_wait_transaction(struct tegra_dp_priv *dp)
79 /* According to DP spec, each aux transaction needs to finish
81 if (tegra_dc_dpaux_poll_register(dp, DPAUX_DP_AUXCTL,
82 DPAUX_DP_AUXCTL_TRANSACTREQ_MASK,
83 DPAUX_DP_AUXCTL_TRANSACTREQ_DONE,
84 100, DP_AUX_TIMEOUT_MS * 1000) != 0) {
85 debug("dp: DPAUX transaction timeout\n");
91 static int tegra_dc_dpaux_write_chunk(struct tegra_dp_priv *dp, u32 cmd,
92 u32 addr, u8 *data, u32 *size,
97 u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES;
98 u32 defer_retries = DP_AUX_DEFER_MAX_TRIES;
101 if (*size > DP_AUX_MAX_BYTES)
102 return -1; /* only write one chunk of data */
104 /* Make sure the command is write command */
106 case DPAUX_DP_AUXCTL_CMD_I2CWR:
107 case DPAUX_DP_AUXCTL_CMD_MOTWR:
108 case DPAUX_DP_AUXCTL_CMD_AUXWR:
111 debug("dp: aux write cmd 0x%x is invalid\n", cmd);
115 tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr);
116 for (i = 0; i < DP_AUX_MAX_BYTES / 4; ++i) {
117 memcpy(&temp_data, data, 4);
118 tegra_dpaux_writel(dp, DPAUX_DP_AUXDATA_WRITE_W(i), temp_data);
122 reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL);
123 reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK;
125 reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD;
126 reg_val |= ((*size - 1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT);
128 while ((timeout_retries > 0) && (defer_retries > 0)) {
129 if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) ||
130 (defer_retries != DP_AUX_DEFER_MAX_TRIES))
133 reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING;
134 tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val);
136 if (tegra_dpaux_wait_transaction(dp))
137 debug("dp: aux write transaction timeout\n");
139 *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
141 if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) ||
142 (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) ||
143 (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) ||
144 (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) {
145 if (timeout_retries-- > 0) {
146 debug("dp: aux write retry (0x%x) -- %d\n",
147 *aux_stat, timeout_retries);
148 /* clear the error bits */
149 tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
153 debug("dp: aux write got error (0x%x)\n",
159 if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) ||
160 (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) {
161 if (defer_retries-- > 0) {
162 debug("dp: aux write defer (0x%x) -- %d\n",
163 *aux_stat, defer_retries);
164 /* clear the error bits */
165 tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
169 debug("dp: aux write defer exceeds max retries (0x%x)\n",
175 if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) ==
176 DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) {
177 *size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK);
180 debug("dp: aux write failed (0x%x)\n", *aux_stat);
184 /* Should never come to here */
188 static int tegra_dc_dpaux_read_chunk(struct tegra_dp_priv *dp, u32 cmd,
189 u32 addr, u8 *data, u32 *size,
193 u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES;
194 u32 defer_retries = DP_AUX_DEFER_MAX_TRIES;
196 if (*size > DP_AUX_MAX_BYTES) {
197 debug("only read one chunk\n");
198 return -EIO; /* only read one chunk */
201 /* Check to make sure the command is read command */
203 case DPAUX_DP_AUXCTL_CMD_I2CRD:
204 case DPAUX_DP_AUXCTL_CMD_I2CREQWSTAT:
205 case DPAUX_DP_AUXCTL_CMD_MOTRD:
206 case DPAUX_DP_AUXCTL_CMD_AUXRD:
209 debug("dp: aux read cmd 0x%x is invalid\n", cmd);
213 *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
214 if (!(*aux_stat & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) {
215 debug("dp: HPD is not detected\n");
219 tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr);
221 reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL);
222 reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK;
224 reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD;
225 reg_val |= ((*size - 1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT);
226 while ((timeout_retries > 0) && (defer_retries > 0)) {
227 if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) ||
228 (defer_retries != DP_AUX_DEFER_MAX_TRIES))
229 udelay(DP_DPCP_RETRY_SLEEP_NS * 2);
231 reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING;
232 tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val);
234 if (tegra_dpaux_wait_transaction(dp))
235 debug("dp: aux read transaction timeout\n");
237 *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
239 if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) ||
240 (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) ||
241 (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) ||
242 (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) {
243 if (timeout_retries-- > 0) {
244 debug("dp: aux read retry (0x%x) -- %d\n",
245 *aux_stat, timeout_retries);
246 /* clear the error bits */
247 tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
249 continue; /* retry */
251 debug("dp: aux read got error (0x%x)\n",
257 if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) ||
258 (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) {
259 if (defer_retries-- > 0) {
260 debug("dp: aux read defer (0x%x) -- %d\n",
261 *aux_stat, defer_retries);
262 /* clear the error bits */
263 tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
267 debug("dp: aux read defer exceeds max retries (0x%x)\n",
273 if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) ==
274 DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) {
278 for (i = 0; i < DP_AUX_MAX_BYTES / 4; ++i)
279 temp_data[i] = tegra_dpaux_readl(dp,
280 DPAUX_DP_AUXDATA_READ_W(i));
282 *size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK);
283 memcpy(data, temp_data, *size);
287 debug("dp: aux read failed (0x%x\n", *aux_stat);
291 /* Should never come to here */
292 debug("%s: can't\n", __func__);
297 static int tegra_dc_dpaux_read(struct tegra_dp_priv *dp, u32 cmd, u32 addr,
298 u8 *data, u32 *size, u32 *aux_stat)
305 cur_size = *size - finished;
306 if (cur_size > DP_AUX_MAX_BYTES)
307 cur_size = DP_AUX_MAX_BYTES;
309 ret = tegra_dc_dpaux_read_chunk(dp, cmd, addr,
310 data, &cur_size, aux_stat);
314 /* cur_size should be the real size returned */
317 finished += cur_size;
319 } while (*size > finished);
325 static int tegra_dc_dp_dpcd_read(struct tegra_dp_priv *dp, u32 cmd,
332 ret = tegra_dc_dpaux_read_chunk(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
333 cmd, data_ptr, &size, &status);
335 debug("dp: Failed to read DPCD data. CMD 0x%x, Status 0x%x\n",
342 static int tegra_dc_dp_dpcd_write(struct tegra_dp_priv *dp, u32 cmd,
349 ret = tegra_dc_dpaux_write_chunk(dp, DPAUX_DP_AUXCTL_CMD_AUXWR,
350 cmd, &data, &size, &status);
352 debug("dp: Failed to write DPCD data. CMD 0x%x, Status 0x%x\n",
359 static int tegra_dc_i2c_aux_read(struct tegra_dp_priv *dp, u32 i2c_addr,
360 u8 addr, u8 *data, u32 size, u32 *aux_stat)
366 u32 cur_size = min((u32)DP_AUX_MAX_BYTES, size - finished);
369 ret = tegra_dc_dpaux_write_chunk(
370 dp, DPAUX_DP_AUXCTL_CMD_MOTWR, i2c_addr,
371 &addr, &len, aux_stat);
373 debug("%s: error sending address to read.\n",
378 ret = tegra_dc_dpaux_read_chunk(
379 dp, DPAUX_DP_AUXCTL_CMD_I2CRD, i2c_addr,
380 data, &cur_size, aux_stat);
382 debug("%s: error reading data.\n", __func__);
386 /* cur_size should be the real size returned */
389 finished += cur_size;
390 } while (size > finished);
395 static void tegra_dc_dpaux_enable(struct tegra_dp_priv *dp)
397 /* clear interrupt */
398 tegra_dpaux_writel(dp, DPAUX_INTR_AUX, 0xffffffff);
399 /* do not enable interrupt for now. Enable them when Isr in place */
400 tegra_dpaux_writel(dp, DPAUX_INTR_EN_AUX, 0x0);
402 tegra_dpaux_writel(dp, DPAUX_HYBRID_PADCTL,
403 DPAUX_HYBRID_PADCTL_AUX_DRVZ_OHM_50 |
404 DPAUX_HYBRID_PADCTL_AUX_CMH_V0_70 |
405 0x18 << DPAUX_HYBRID_PADCTL_AUX_DRVI_SHIFT |
406 DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV_ENABLE);
408 tegra_dpaux_writel(dp, DPAUX_HYBRID_SPARE,
409 DPAUX_HYBRID_SPARE_PAD_PWR_POWERUP);
413 static void tegra_dc_dp_dump_link_cfg(struct tegra_dp_priv *dp,
414 const struct tegra_dp_link_config *link_cfg)
416 debug("DP config: cfg_name cfg_value\n");
417 debug(" Lane Count %d\n",
418 link_cfg->max_lane_count);
419 debug(" SupportEnhancedFraming %s\n",
420 link_cfg->support_enhanced_framing ? "Y" : "N");
421 debug(" Bandwidth %d\n",
422 link_cfg->max_link_bw);
424 link_cfg->bits_per_pixel);
425 debug(" EnhancedFraming %s\n",
426 link_cfg->enhanced_framing ? "Y" : "N");
427 debug(" Scramble_enabled %s\n",
428 link_cfg->scramble_ena ? "Y" : "N");
429 debug(" LinkBW %d\n",
431 debug(" lane_count %d\n",
432 link_cfg->lane_count);
433 debug(" activespolarity %d\n",
434 link_cfg->activepolarity);
435 debug(" active_count %d\n",
436 link_cfg->active_count);
437 debug(" tu_size %d\n",
439 debug(" active_frac %d\n",
440 link_cfg->active_frac);
441 debug(" watermark %d\n",
442 link_cfg->watermark);
443 debug(" hblank_sym %d\n",
444 link_cfg->hblank_sym);
445 debug(" vblank_sym %d\n",
446 link_cfg->vblank_sym);
450 static int _tegra_dp_lower_link_config(struct tegra_dp_priv *dp,
451 struct tegra_dp_link_config *cfg)
453 switch (cfg->link_bw) {
454 case SOR_LINK_SPEED_G1_62:
455 if (cfg->max_link_bw > SOR_LINK_SPEED_G1_62)
456 cfg->link_bw = SOR_LINK_SPEED_G2_7;
457 cfg->lane_count /= 2;
459 case SOR_LINK_SPEED_G2_7:
460 cfg->link_bw = SOR_LINK_SPEED_G1_62;
462 case SOR_LINK_SPEED_G5_4:
463 if (cfg->lane_count == 1) {
464 cfg->link_bw = SOR_LINK_SPEED_G2_7;
465 cfg->lane_count = cfg->max_lane_count;
467 cfg->lane_count /= 2;
471 debug("dp: Error link rate %d\n", cfg->link_bw);
475 return (cfg->lane_count > 0) ? 0 : -ENOLINK;
479 * Calcuate if given cfg can meet the mode request.
480 * Return 0 if mode is possible, -1 otherwise
482 static int tegra_dc_dp_calc_config(struct tegra_dp_priv *dp,
483 const struct display_timing *timing,
484 struct tegra_dp_link_config *link_cfg)
486 const u32 link_rate = 27 * link_cfg->link_bw * 1000 * 1000;
487 const u64 f = 100000; /* precision factor */
488 u32 num_linkclk_line; /* Number of link clocks per line */
489 u64 ratio_f; /* Ratio of incoming to outgoing data rate */
491 u64 activesym_f; /* Activesym per TU */
497 u64 accumulated_error_f = 0;
498 u32 lowest_neg_activecount = 0;
499 u32 lowest_neg_activepolarity = 0;
500 u32 lowest_neg_tusize = 64;
501 u32 num_symbols_per_line;
502 u64 lowest_neg_activefrac = 0;
503 u64 lowest_neg_error_f = 64 * f;
508 if (!link_rate || !link_cfg->lane_count || !timing->pixelclock.typ ||
509 !link_cfg->bits_per_pixel)
512 if ((u64)timing->pixelclock.typ * link_cfg->bits_per_pixel >=
513 (u64)link_rate * 8 * link_cfg->lane_count)
516 num_linkclk_line = (u32)(lldiv(link_rate * timing->hactive.typ,
517 timing->pixelclock.typ));
519 ratio_f = (u64)timing->pixelclock.typ * link_cfg->bits_per_pixel * f;
521 do_div(ratio_f, link_rate * link_cfg->lane_count);
523 for (i = 64; i >= 32; --i) {
524 activesym_f = ratio_f * i;
525 activecount_f = lldiv(activesym_f, (u32)f) * f;
526 frac_f = activesym_f - activecount_f;
527 activecount = (u32)(lldiv(activecount_f, (u32)f));
529 if (frac_f < (lldiv(f, 2))) /* fraction < 0.5 */
537 /* warning: frac_f should be 64-bit */
538 frac_f = lldiv(f * f, frac_f); /* 1 / fraction */
539 if (frac_f > (15 * f))
540 activefrac = activepolarity ? 1 : 15;
542 activefrac = activepolarity ?
543 (u32)lldiv(frac_f, (u32)f) + 1 :
544 (u32)lldiv(frac_f, (u32)f);
550 if (activepolarity == 1)
551 approx_value_f = activefrac ? lldiv(
552 (activecount_f + (activefrac * f - f) * f),
556 approx_value_f = activefrac ?
557 activecount_f + lldiv(f, activefrac) :
560 if (activesym_f < approx_value_f) {
561 accumulated_error_f = num_linkclk_line *
562 lldiv(approx_value_f - activesym_f, i);
565 accumulated_error_f = num_linkclk_line *
566 lldiv(activesym_f - approx_value_f, i);
570 if ((neg && (lowest_neg_error_f > accumulated_error_f)) ||
571 (accumulated_error_f == 0)) {
572 lowest_neg_error_f = accumulated_error_f;
573 lowest_neg_tusize = i;
574 lowest_neg_activecount = activecount;
575 lowest_neg_activepolarity = activepolarity;
576 lowest_neg_activefrac = activefrac;
578 if (accumulated_error_f == 0)
583 if (lowest_neg_activefrac == 0) {
584 link_cfg->activepolarity = 0;
585 link_cfg->active_count = lowest_neg_activepolarity ?
586 lowest_neg_activecount : lowest_neg_activecount - 1;
587 link_cfg->tu_size = lowest_neg_tusize;
588 link_cfg->active_frac = 1;
590 link_cfg->activepolarity = lowest_neg_activepolarity;
591 link_cfg->active_count = (u32)lowest_neg_activecount;
592 link_cfg->tu_size = lowest_neg_tusize;
593 link_cfg->active_frac = (u32)lowest_neg_activefrac;
596 watermark_f = lldiv(ratio_f * link_cfg->tu_size * (f - ratio_f), f);
597 link_cfg->watermark = (u32)(lldiv(watermark_f + lowest_neg_error_f,
598 f)) + link_cfg->bits_per_pixel / 4 - 1;
599 num_symbols_per_line = (timing->hactive.typ *
600 link_cfg->bits_per_pixel) /
601 (8 * link_cfg->lane_count);
603 if (link_cfg->watermark > 30) {
604 debug("dp: sor setting: unable to get a good tusize, force watermark to 30\n");
605 link_cfg->watermark = 30;
607 } else if (link_cfg->watermark > num_symbols_per_line) {
608 debug("dp: sor setting: force watermark to the number of symbols in the line\n");
609 link_cfg->watermark = num_symbols_per_line;
614 * Refer to dev_disp.ref for more information.
615 * # symbols/hblank = ((SetRasterBlankEnd.X + SetRasterSize.Width -
616 * SetRasterBlankStart.X - 7) * link_clk / pclk)
617 * - 3 * enhanced_framing - Y
618 * where Y = (# lanes == 4) 3 : (# lanes == 2) ? 6 : 12
620 link_cfg->hblank_sym = (int)lldiv(((uint64_t)timing->hback_porch.typ +
621 timing->hfront_porch.typ + timing->hsync_len.typ - 7) *
622 link_rate, timing->pixelclock.typ) -
623 3 * link_cfg->enhanced_framing -
624 (12 / link_cfg->lane_count);
626 if (link_cfg->hblank_sym < 0)
627 link_cfg->hblank_sym = 0;
631 * Refer to dev_disp.ref for more information.
632 * # symbols/vblank = ((SetRasterBlankStart.X -
633 * SetRasterBlankEen.X - 25) * link_clk / pclk)
635 * where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39
637 link_cfg->vblank_sym = (int)lldiv(((uint64_t)timing->hactive.typ - 25)
638 * link_rate, timing->pixelclock.typ) - (36 /
639 link_cfg->lane_count) - 4;
641 if (link_cfg->vblank_sym < 0)
642 link_cfg->vblank_sym = 0;
644 link_cfg->is_valid = 1;
646 tegra_dc_dp_dump_link_cfg(dp, link_cfg);
652 static int tegra_dc_dp_init_max_link_cfg(
653 const struct display_timing *timing,
654 struct tegra_dp_priv *dp,
655 struct tegra_dp_link_config *link_cfg)
657 const int drive_current = 0x40404040;
658 const int preemphasis = 0x0f0f0f0f;
659 const int postcursor = 0;
663 ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_LANE_COUNT, &dpcd_data);
666 link_cfg->max_lane_count = dpcd_data & DP_MAX_LANE_COUNT_MASK;
667 link_cfg->tps3_supported = (dpcd_data &
668 DP_MAX_LANE_COUNT_TPS3_SUPPORTED_YES) ? 1 : 0;
670 link_cfg->support_enhanced_framing =
671 (dpcd_data & DP_MAX_LANE_COUNT_ENHANCED_FRAMING_YES) ?
674 ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_DOWNSPREAD, &dpcd_data);
677 link_cfg->downspread = (dpcd_data & DP_MAX_DOWNSPREAD_VAL_0_5_PCT) ?
680 ret = tegra_dc_dp_dpcd_read(dp, NV_DPCD_TRAINING_AUX_RD_INTERVAL,
681 &link_cfg->aux_rd_interval);
684 ret = tegra_dc_dp_dpcd_read(dp, DP_MAX_LINK_RATE,
685 &link_cfg->max_link_bw);
690 * Set to a high value for link training and attach.
691 * Will be re-programmed when dp is enabled.
693 link_cfg->drive_current = drive_current;
694 link_cfg->preemphasis = preemphasis;
695 link_cfg->postcursor = postcursor;
697 ret = tegra_dc_dp_dpcd_read(dp, DP_EDP_CONFIGURATION_CAP, &dpcd_data);
701 link_cfg->alt_scramber_reset_cap =
702 (dpcd_data & DP_EDP_CONFIGURATION_CAP_ASC_RESET_YES) ?
704 link_cfg->only_enhanced_framing =
705 (dpcd_data & DP_EDP_CONFIGURATION_CAP_FRAMING_CHANGE_YES) ?
708 link_cfg->lane_count = link_cfg->max_lane_count;
709 link_cfg->link_bw = link_cfg->max_link_bw;
710 link_cfg->enhanced_framing = link_cfg->support_enhanced_framing;
711 link_cfg->frame_in_ms = (1000 / 60) + 1;
713 tegra_dc_dp_calc_config(dp, timing, link_cfg);
717 static int tegra_dc_dp_set_assr(struct tegra_dp_priv *priv,
718 struct udevice *sor, int ena)
723 DP_MAIN_LINK_CHANNEL_CODING_SET_ASC_RESET_ENABLE :
724 DP_MAIN_LINK_CHANNEL_CODING_SET_ASC_RESET_DISABLE;
726 ret = tegra_dc_dp_dpcd_write(priv, DP_EDP_CONFIGURATION_SET,
731 /* Also reset the scrambler to 0xfffe */
732 tegra_dc_sor_set_internal_panel(sor, ena);
736 static int tegra_dp_set_link_bandwidth(struct tegra_dp_priv *dp,
740 tegra_dc_sor_set_link_bandwidth(sor, link_bw);
743 return tegra_dc_dp_dpcd_write(dp, DP_LINK_BW_SET, link_bw);
746 static int tegra_dp_set_lane_count(struct tegra_dp_priv *dp,
747 const struct tegra_dp_link_config *link_cfg,
753 /* check if panel support enhanched_framing */
754 dpcd_data = link_cfg->lane_count;
755 if (link_cfg->enhanced_framing)
756 dpcd_data |= DP_LANE_COUNT_SET_ENHANCEDFRAMING_T;
757 ret = tegra_dc_dp_dpcd_write(dp, DP_LANE_COUNT_SET, dpcd_data);
761 tegra_dc_sor_set_lane_count(sor, link_cfg->lane_count);
763 /* Also power down lanes that will not be used */
767 static int tegra_dc_dp_link_trained(struct tegra_dp_priv *dp,
768 const struct tegra_dp_link_config *cfg)
775 for (lane = 0; lane < cfg->lane_count; ++lane) {
776 ret = tegra_dc_dp_dpcd_read(dp, (lane / 2) ?
777 DP_LANE2_3_STATUS : DP_LANE0_1_STATUS,
782 NV_DPCD_STATUS_LANEXPLUS1_CR_DONE_YES |
783 NV_DPCD_STATUS_LANEXPLUS1_CHN_EQ_DONE_YES |
784 NV_DPCD_STATUS_LANEXPLUS1_SYMBOL_LOCKED_YES :
786 DP_LANE_CHANNEL_EQ_DONE |
787 DP_LANE_SYMBOL_LOCKED;
788 if ((data & mask) != mask)
794 static int tegra_dp_channel_eq_status(struct tegra_dp_priv *dp,
795 const struct tegra_dp_link_config *cfg)
798 u32 n_lanes = cfg->lane_count;
803 for (cnt = 0; cnt < n_lanes / 2; cnt++) {
804 ret = tegra_dc_dp_dpcd_read(dp, DP_LANE0_1_STATUS + cnt, &data);
809 ce_done = (data & (0x1 <<
810 NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_SHIFT)) &&
812 NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_SHFIT));
814 } else if (!(data & (0x1 <<
815 NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_SHIFT)) ||
817 NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_SHFIT)) ||
819 NV_DPCD_STATUS_LANEXPLUS1_CHN_EQ_DONE_SHIFT)) ||
821 NV_DPCD_STATUS_LANEXPLUS1_SYMBOL_LOCKED_SHIFT)))
826 ret = tegra_dc_dp_dpcd_read(dp,
827 DP_LANE_ALIGN_STATUS_UPDATED,
831 if (!(data & NV_DPCD_LANE_ALIGN_STATUS_UPDATED_DONE_YES))
835 return ce_done ? 0 : -EIO;
838 static int tegra_dp_clock_recovery_status(struct tegra_dp_priv *dp,
839 const struct tegra_dp_link_config *cfg)
842 u32 n_lanes = cfg->lane_count;
846 for (cnt = 0; cnt < n_lanes / 2; cnt++) {
847 ret = tegra_dc_dp_dpcd_read(dp, (DP_LANE0_1_STATUS + cnt),
853 return (data_ptr & NV_DPCD_STATUS_LANEX_CR_DONE_YES) ?
855 else if (!(data_ptr & NV_DPCD_STATUS_LANEX_CR_DONE_YES) ||
856 !(data_ptr & (NV_DPCD_STATUS_LANEXPLUS1_CR_DONE_YES)))
863 static int tegra_dp_lt_adjust(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
864 u32 pc[4], u8 pc_supported,
865 const struct tegra_dp_link_config *cfg)
869 u32 n_lanes = cfg->lane_count;
872 for (cnt = 0; cnt < n_lanes / 2; cnt++) {
873 ret = tegra_dc_dp_dpcd_read(dp, DP_ADJUST_REQUEST_LANE0_1 + cnt,
877 pe[2 * cnt] = (data_ptr & NV_DPCD_ADJUST_REQ_LANEX_PE_MASK) >>
878 NV_DPCD_ADJUST_REQ_LANEX_PE_SHIFT;
879 vs[2 * cnt] = (data_ptr & NV_DPCD_ADJUST_REQ_LANEX_DC_MASK) >>
880 NV_DPCD_ADJUST_REQ_LANEX_DC_SHIFT;
882 (data_ptr & NV_DPCD_ADJUST_REQ_LANEXPLUS1_PE_MASK) >>
883 NV_DPCD_ADJUST_REQ_LANEXPLUS1_PE_SHIFT;
885 (data_ptr & NV_DPCD_ADJUST_REQ_LANEXPLUS1_DC_MASK) >>
886 NV_DPCD_ADJUST_REQ_LANEXPLUS1_DC_SHIFT;
889 ret = tegra_dc_dp_dpcd_read(dp, NV_DPCD_ADJUST_REQ_POST_CURSOR2,
893 for (cnt = 0; cnt < n_lanes; cnt++) {
894 pc[cnt] = (data_ptr >>
895 NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE_SHIFT(cnt)) &
896 NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE_MASK;
903 static void tegra_dp_wait_aux_training(struct tegra_dp_priv *dp,
904 bool is_clk_recovery,
905 const struct tegra_dp_link_config *cfg)
907 if (!cfg->aux_rd_interval)
908 udelay(is_clk_recovery ? 200 : 500);
910 mdelay(cfg->aux_rd_interval * 4);
913 static void tegra_dp_tpg(struct tegra_dp_priv *dp, u32 tp, u32 n_lanes,
914 const struct tegra_dp_link_config *cfg)
916 u8 data = (tp == training_pattern_disabled)
917 ? (tp | NV_DPCD_TRAINING_PATTERN_SET_SC_DISABLED_F)
918 : (tp | NV_DPCD_TRAINING_PATTERN_SET_SC_DISABLED_T);
920 tegra_dc_sor_set_dp_linkctl(dp->sor, 1, tp, cfg);
921 tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET, data);
924 static int tegra_dp_link_config(struct tegra_dp_priv *dp,
925 const struct tegra_dp_link_config *link_cfg)
931 if (link_cfg->lane_count == 0) {
932 debug("dp: error: lane count is 0. Can not set link config.\n");
936 /* Set power state if it is not in normal level */
937 ret = tegra_dc_dp_dpcd_read(dp, DP_SET_POWER, &dpcd_data);
941 if (dpcd_data == DP_SET_POWER_D3) {
942 dpcd_data = DP_SET_POWER_D0;
944 /* DP spec requires 3 retries */
945 for (retry = 3; retry > 0; --retry) {
946 ret = tegra_dc_dp_dpcd_write(dp, DP_SET_POWER,
951 debug("dp: Failed to set DP panel power\n");
957 /* Enable ASSR if possible */
958 if (link_cfg->alt_scramber_reset_cap) {
959 ret = tegra_dc_dp_set_assr(dp, dp->sor, 1);
964 ret = tegra_dp_set_link_bandwidth(dp, dp->sor, link_cfg->link_bw);
966 debug("dp: Failed to set link bandwidth\n");
969 ret = tegra_dp_set_lane_count(dp, link_cfg, dp->sor);
971 debug("dp: Failed to set lane count\n");
974 tegra_dc_sor_set_dp_linkctl(dp->sor, 1, training_pattern_none,
980 static int tegra_dp_lower_link_config(struct tegra_dp_priv *dp,
981 const struct display_timing *timing,
982 struct tegra_dp_link_config *cfg)
984 struct tegra_dp_link_config tmp_cfg;
990 ret = _tegra_dp_lower_link_config(dp, cfg);
992 ret = tegra_dc_dp_calc_config(dp, timing, cfg);
994 ret = tegra_dp_link_config(dp, cfg);
1002 tegra_dp_link_config(dp, &tmp_cfg);
1006 static int tegra_dp_lt_config(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
1007 u32 pc[4], const struct tegra_dp_link_config *cfg)
1009 struct udevice *sor = dp->sor;
1010 u32 n_lanes = cfg->lane_count;
1011 u8 pc_supported = cfg->tps3_supported;
1015 for (cnt = 0; cnt < n_lanes; cnt++) {
1017 u32 pe_reg, vs_reg, pc_reg;
1022 mask = PR_LANE2_DP_LANE0_MASK;
1023 shift = PR_LANE2_DP_LANE0_SHIFT;
1026 mask = PR_LANE1_DP_LANE1_MASK;
1027 shift = PR_LANE1_DP_LANE1_SHIFT;
1030 mask = PR_LANE0_DP_LANE2_MASK;
1031 shift = PR_LANE0_DP_LANE2_SHIFT;
1034 mask = PR_LANE3_DP_LANE3_MASK;
1035 shift = PR_LANE3_DP_LANE3_SHIFT;
1038 debug("dp: incorrect lane cnt\n");
1042 pe_reg = tegra_dp_pe_regs[pc[cnt]][vs[cnt]][pe[cnt]];
1043 vs_reg = tegra_dp_vs_regs[pc[cnt]][vs[cnt]][pe[cnt]];
1044 pc_reg = tegra_dp_pc_regs[pc[cnt]][vs[cnt]][pe[cnt]];
1046 tegra_dp_set_pe_vs_pc(sor, mask, pe_reg << shift,
1047 vs_reg << shift, pc_reg << shift,
1051 tegra_dp_disable_tx_pu(dp->sor);
1054 for (cnt = 0; cnt < n_lanes; cnt++) {
1055 u32 max_vs_flag = tegra_dp_is_max_vs(pe[cnt], vs[cnt]);
1056 u32 max_pe_flag = tegra_dp_is_max_pe(pe[cnt], vs[cnt]);
1058 val = (vs[cnt] << NV_DPCD_TRAINING_LANEX_SET_DC_SHIFT) |
1060 NV_DPCD_TRAINING_LANEX_SET_DC_MAX_REACHED_T :
1061 NV_DPCD_TRAINING_LANEX_SET_DC_MAX_REACHED_F) |
1062 (pe[cnt] << NV_DPCD_TRAINING_LANEX_SET_PE_SHIFT) |
1064 NV_DPCD_TRAINING_LANEX_SET_PE_MAX_REACHED_T :
1065 NV_DPCD_TRAINING_LANEX_SET_PE_MAX_REACHED_F);
1066 tegra_dc_dp_dpcd_write(dp, (DP_TRAINING_LANE0_SET + cnt), val);
1070 for (cnt = 0; cnt < n_lanes / 2; cnt++) {
1071 u32 max_pc_flag0 = tegra_dp_is_max_pc(pc[cnt]);
1072 u32 max_pc_flag1 = tegra_dp_is_max_pc(pc[cnt + 1]);
1073 val = (pc[cnt] << NV_DPCD_LANEX_SET2_PC2_SHIFT) |
1075 NV_DPCD_LANEX_SET2_PC2_MAX_REACHED_T :
1076 NV_DPCD_LANEX_SET2_PC2_MAX_REACHED_F) |
1078 NV_DPCD_LANEXPLUS1_SET2_PC2_SHIFT) |
1080 NV_DPCD_LANEXPLUS1_SET2_PC2_MAX_REACHED_T :
1081 NV_DPCD_LANEXPLUS1_SET2_PC2_MAX_REACHED_F);
1082 tegra_dc_dp_dpcd_write(dp,
1083 NV_DPCD_TRAINING_LANE0_1_SET2 +
1091 static int _tegra_dp_channel_eq(struct tegra_dp_priv *dp, u32 pe[4],
1092 u32 vs[4], u32 pc[4], u8 pc_supported,
1094 const struct tegra_dp_link_config *cfg)
1098 for (retry_cnt = 0; retry_cnt < 4; retry_cnt++) {
1102 ret = tegra_dp_lt_adjust(dp, pe, vs, pc, pc_supported,
1106 tegra_dp_lt_config(dp, pe, vs, pc, cfg);
1109 tegra_dp_wait_aux_training(dp, false, cfg);
1111 if (!tegra_dp_clock_recovery_status(dp, cfg)) {
1112 debug("dp: CR failed in channel EQ sequence!\n");
1116 if (!tegra_dp_channel_eq_status(dp, cfg))
1123 static int tegra_dp_channel_eq(struct tegra_dp_priv *dp, u32 pe[4], u32 vs[4],
1125 const struct tegra_dp_link_config *cfg)
1127 u32 n_lanes = cfg->lane_count;
1128 u8 pc_supported = cfg->tps3_supported;
1130 u32 tp_src = training_pattern_2;
1133 tp_src = training_pattern_3;
1135 tegra_dp_tpg(dp, tp_src, n_lanes, cfg);
1137 ret = _tegra_dp_channel_eq(dp, pe, vs, pc, pc_supported, n_lanes, cfg);
1139 tegra_dp_tpg(dp, training_pattern_disabled, n_lanes, cfg);
1144 static int _tegra_dp_clk_recovery(struct tegra_dp_priv *dp, u32 pe[4],
1145 u32 vs[4], u32 pc[4], u8 pc_supported,
1147 const struct tegra_dp_link_config *cfg)
1153 tegra_dp_lt_config(dp, pe, vs, pc, cfg);
1154 tegra_dp_wait_aux_training(dp, true, cfg);
1156 if (tegra_dp_clock_recovery_status(dp, cfg))
1159 memcpy(vs_temp, vs, sizeof(vs_temp));
1160 tegra_dp_lt_adjust(dp, pe, vs, pc, pc_supported, cfg);
1162 if (memcmp(vs_temp, vs, sizeof(vs_temp)))
1166 } while (retry_cnt < 5);
1171 static int tegra_dp_clk_recovery(struct tegra_dp_priv *dp, u32 pe[4],
1172 u32 vs[4], u32 pc[4],
1173 const struct tegra_dp_link_config *cfg)
1175 u32 n_lanes = cfg->lane_count;
1176 u8 pc_supported = cfg->tps3_supported;
1179 tegra_dp_tpg(dp, training_pattern_1, n_lanes, cfg);
1181 err = _tegra_dp_clk_recovery(dp, pe, vs, pc, pc_supported, n_lanes,
1184 tegra_dp_tpg(dp, training_pattern_disabled, n_lanes, cfg);
1189 static int tegra_dc_dp_full_link_training(struct tegra_dp_priv *dp,
1190 const struct display_timing *timing,
1191 struct tegra_dp_link_config *cfg)
1193 struct udevice *sor = dp->sor;
1195 u32 pe[4], vs[4], pc[4];
1197 tegra_sor_precharge_lanes(sor, cfg);
1200 memset(pe, PREEMPHASIS_DISABLED, sizeof(pe));
1201 memset(vs, DRIVECURRENT_LEVEL0, sizeof(vs));
1202 memset(pc, POSTCURSOR2_LEVEL0, sizeof(pc));
1204 err = tegra_dp_clk_recovery(dp, pe, vs, pc, cfg);
1206 if (!tegra_dp_lower_link_config(dp, timing, cfg))
1209 debug("dp: clk recovery failed\n");
1213 err = tegra_dp_channel_eq(dp, pe, vs, pc, cfg);
1215 if (!tegra_dp_lower_link_config(dp, timing, cfg))
1218 debug("dp: channel equalization failed\n");
1222 tegra_dc_dp_dump_link_cfg(dp, cfg);
1231 * All link training functions are ported from kernel dc driver.
1232 * See more details at drivers/video/tegra/dc/dp.c
1234 static int tegra_dc_dp_fast_link_training(struct tegra_dp_priv *dp,
1235 const struct tegra_dp_link_config *link_cfg,
1236 struct udevice *sor)
1245 u32 mask = 0xffff >> ((4 - link_cfg->lane_count) * 4);
1247 tegra_dc_sor_set_lane_parm(sor, link_cfg);
1248 tegra_dc_dp_dpcd_write(dp, DP_MAIN_LINK_CHANNEL_CODING_SET,
1252 tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_1, link_cfg);
1253 tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET,
1254 DP_TRAINING_PATTERN_1);
1256 for (j = 0; j < link_cfg->lane_count; ++j)
1257 tegra_dc_dp_dpcd_write(dp, DP_TRAINING_LANE0_SET + j, 0x24);
1260 size = sizeof(data16);
1261 tegra_dc_dpaux_read(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
1262 DP_LANE0_1_STATUS, (u8 *)&data16, &size, &status);
1263 status = mask & 0x1111;
1264 if ((data16 & status) != status) {
1265 debug("dp: Link training error for TP1 (%#x, status %#x)\n",
1271 tegra_dc_dp_set_assr(dp, sor, link_cfg->scramble_ena);
1272 tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_3, link_cfg);
1274 tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET,
1275 link_cfg->link_bw == 20 ? 0x23 : 0x22);
1276 for (j = 0; j < link_cfg->lane_count; ++j)
1277 tegra_dc_dp_dpcd_write(dp, DP_TRAINING_LANE0_SET + j, 0x24);
1280 size = sizeof(data32);
1281 tegra_dc_dpaux_read(dp, DPAUX_DP_AUXCTL_CMD_AUXRD, DP_LANE0_1_STATUS,
1282 (u8 *)&data32, &size, &status);
1283 if ((data32 & mask) != (0x7777 & mask)) {
1284 debug("dp: Link training error for TP2/3 (0x%x)\n", data32);
1288 tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_disabled,
1290 tegra_dc_dp_dpcd_write(dp, DP_TRAINING_PATTERN_SET, 0);
1292 if (tegra_dc_dp_link_trained(dp, link_cfg)) {
1293 tegra_dc_sor_read_link_config(sor, &link_bw, &lane_count);
1294 debug("Fast link training failed, link bw %d, lane # %d\n",
1295 link_bw, lane_count);
1299 debug("Fast link training succeeded, link bw %d, lane %d\n",
1300 link_cfg->link_bw, link_cfg->lane_count);
1305 static int tegra_dp_do_link_training(struct tegra_dp_priv *dp,
1306 struct tegra_dp_link_config *link_cfg,
1307 const struct display_timing *timing,
1308 struct udevice *sor)
1314 if (DO_FAST_LINK_TRAINING) {
1315 ret = tegra_dc_dp_fast_link_training(dp, link_cfg, sor);
1317 debug("dp: fast link training failed\n");
1320 * set to a known-good drive setting if fast link
1321 * succeeded. Ignore any error.
1323 ret = tegra_dc_sor_set_voltage_swing(dp->sor, link_cfg);
1325 debug("Failed to set voltage swing\n");
1331 /* Try full link training then */
1332 ret = tegra_dc_dp_full_link_training(dp, timing, link_cfg);
1334 debug("dp: full link training failed\n");
1339 /* Everything is good; double check the link config */
1340 tegra_dc_sor_read_link_config(sor, &link_bw, &lane_count);
1342 if ((link_cfg->link_bw == link_bw) &&
1343 (link_cfg->lane_count == lane_count))
1349 static int tegra_dc_dp_explore_link_cfg(struct tegra_dp_priv *dp,
1350 struct tegra_dp_link_config *link_cfg,
1351 struct udevice *sor,
1352 const struct display_timing *timing)
1354 struct tegra_dp_link_config temp_cfg;
1356 if (!timing->pixelclock.typ || !timing->hactive.typ ||
1357 !timing->vactive.typ) {
1358 debug("dp: error mode configuration");
1361 if (!link_cfg->max_link_bw || !link_cfg->max_lane_count) {
1362 debug("dp: error link configuration");
1366 link_cfg->is_valid = 0;
1368 memcpy(&temp_cfg, link_cfg, sizeof(temp_cfg));
1370 temp_cfg.link_bw = temp_cfg.max_link_bw;
1371 temp_cfg.lane_count = temp_cfg.max_lane_count;
1374 * set to max link config
1376 if ((!tegra_dc_dp_calc_config(dp, timing, &temp_cfg)) &&
1377 (!tegra_dp_link_config(dp, &temp_cfg)) &&
1378 (!tegra_dp_do_link_training(dp, &temp_cfg, timing, sor)))
1379 /* the max link cfg is doable */
1380 memcpy(link_cfg, &temp_cfg, sizeof(temp_cfg));
1382 return link_cfg->is_valid ? 0 : -EFAULT;
1385 static int tegra_dp_hpd_plug(struct tegra_dp_priv *dp)
1387 const int vdd_to_hpd_delay_ms = 200;
1391 start = get_timer(0);
1393 val = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
1394 if (val & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)
1397 } while (get_timer(start) < vdd_to_hpd_delay_ms);
1402 static int tegra_dc_dp_sink_out_of_sync(struct tegra_dp_priv *dp, u32 delay_ms)
1408 debug("%s: delay=%d\n", __func__, delay_ms);
1410 ret = tegra_dc_dp_dpcd_read(dp, DP_SINK_STATUS, &dpcd_data);
1414 out_of_sync = !(dpcd_data & DP_SINK_STATUS_PORT0_IN_SYNC);
1416 debug("SINK receive port 0 out of sync, data=%x\n", dpcd_data);
1418 debug("SINK is in synchronization\n");
1423 static int tegra_dc_dp_check_sink(struct tegra_dp_priv *dp,
1424 struct tegra_dp_link_config *link_cfg,
1425 const struct display_timing *timing)
1427 const int max_retry = 5;
1432 * DP TCON may skip some main stream frames, thus we need to wait
1433 * some delay before reading the DPCD SINK STATUS register, starting
1438 retries = max_retry;
1442 if (!tegra_dc_dp_sink_out_of_sync(dp, link_cfg->frame_in_ms *
1446 debug("%s: retries left %d\n", __func__, retries);
1448 printf("DP: Out of sync after %d retries\n", max_retry);
1451 ret = tegra_dc_sor_detach(dp->dc_dev, dp->sor);
1454 if (tegra_dc_dp_explore_link_cfg(dp, link_cfg, dp->sor,
1456 debug("dp: %s: error to configure link\n", __func__);
1460 tegra_dc_sor_set_power_state(dp->sor, 1);
1461 tegra_dc_sor_attach(dp->dc_dev, dp->sor, link_cfg, timing);
1463 /* Increase delay_frame for next try in case the sink is
1464 skipping more frames */
1469 int tegra_dp_enable(struct udevice *dev, int panel_bpp,
1470 const struct display_timing *timing)
1472 struct tegra_dp_priv *priv = dev_get_priv(dev);
1473 struct tegra_dp_link_config slink_cfg, *link_cfg = &slink_cfg;
1474 struct udevice *sor;
1479 memset(link_cfg, '\0', sizeof(*link_cfg));
1480 link_cfg->is_valid = 0;
1481 link_cfg->scramble_ena = 1;
1483 tegra_dc_dpaux_enable(priv);
1485 if (tegra_dp_hpd_plug(priv) < 0) {
1486 debug("dp: hpd plug failed\n");
1490 link_cfg->bits_per_pixel = panel_bpp;
1491 if (tegra_dc_dp_init_max_link_cfg(timing, priv, link_cfg)) {
1492 debug("dp: failed to init link configuration\n");
1496 ret = uclass_first_device(UCLASS_VIDEO_BRIDGE, &sor);
1498 debug("dp: failed to find SOR device: ret=%d\n", ret);
1502 ret = tegra_dc_sor_enable_dp(sor, link_cfg);
1506 tegra_dc_sor_set_panel_power(sor, 1);
1508 /* Write power on to DPCD */
1509 data = DP_SET_POWER_D0;
1512 ret = tegra_dc_dp_dpcd_write(priv, DP_SET_POWER, data);
1513 } while ((retry++ < DP_POWER_ON_MAX_TRIES) && ret);
1515 if (ret || retry >= DP_POWER_ON_MAX_TRIES) {
1516 debug("dp: failed to power on panel (0x%x)\n", ret);
1517 return -ENETUNREACH;
1521 /* Confirm DP plugging status */
1522 if (!(tegra_dpaux_readl(priv, DPAUX_DP_AUXSTAT) &
1523 DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) {
1524 debug("dp: could not detect HPD\n");
1528 /* Check DP version */
1529 if (tegra_dc_dp_dpcd_read(priv, DP_DPCD_REV, &priv->revision)) {
1530 debug("dp: failed to read the revision number from sink\n");
1534 if (tegra_dc_dp_explore_link_cfg(priv, link_cfg, sor, timing)) {
1535 debug("dp: error configuring link\n");
1539 tegra_dc_sor_set_power_state(sor, 1);
1540 ret = tegra_dc_sor_attach(priv->dc_dev, sor, link_cfg, timing);
1541 if (ret && ret != -EEXIST)
1545 * This takes a long time, but can apparently resolve a failure to
1546 * bring up the display correctly.
1549 ret = tegra_dc_dp_check_sink(priv, link_cfg, timing);
1554 /* Power down the unused lanes to save power - a few hundred mW */
1555 tegra_dc_sor_power_down_unused_lanes(sor, link_cfg);
1557 ret = video_bridge_set_backlight(sor, 80);
1559 debug("dp: failed to set backlight\n");
1563 priv->enabled = true;
1568 static int tegra_dp_ofdata_to_platdata(struct udevice *dev)
1570 struct tegra_dp_plat *plat = dev_get_platdata(dev);
1572 plat->base = dev_read_addr(dev);
1577 static int tegra_dp_read_edid(struct udevice *dev, u8 *buf, int buf_size)
1579 struct tegra_dp_priv *priv = dev_get_priv(dev);
1580 const int tegra_edid_i2c_address = 0x50;
1583 tegra_dc_dpaux_enable(priv);
1585 return tegra_dc_i2c_aux_read(priv, tegra_edid_i2c_address, 0, buf,
1586 buf_size, &aux_stat);
1589 static const struct dm_display_ops dp_tegra_ops = {
1590 .read_edid = tegra_dp_read_edid,
1591 .enable = tegra_dp_enable,
1594 static int dp_tegra_probe(struct udevice *dev)
1596 struct tegra_dp_plat *plat = dev_get_platdata(dev);
1597 struct tegra_dp_priv *priv = dev_get_priv(dev);
1598 struct display_plat *disp_uc_plat = dev_get_uclass_platdata(dev);
1600 priv->regs = (struct dpaux_ctlr *)plat->base;
1601 priv->enabled = false;
1603 /* Remember the display controller that is sending us video */
1604 priv->dc_dev = disp_uc_plat->src_dev;
1609 static const struct udevice_id tegra_dp_ids[] = {
1610 { .compatible = "nvidia,tegra124-dpaux" },
1614 U_BOOT_DRIVER(dp_tegra) = {
1615 .name = "dpaux_tegra",
1616 .id = UCLASS_DISPLAY,
1617 .of_match = tegra_dp_ids,
1618 .ofdata_to_platdata = tegra_dp_ofdata_to_platdata,
1619 .probe = dp_tegra_probe,
1620 .ops = &dp_tegra_ops,
1621 .priv_auto_alloc_size = sizeof(struct tegra_dp_priv),
1622 .platdata_auto_alloc_size = sizeof(struct tegra_dp_plat),