1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include <linux/string_helpers.h>
9 #include "intel_atomic.h"
10 #include "intel_crtc.h"
11 #include "intel_ddi.h"
13 #include "intel_display_types.h"
14 #include "intel_fdi.h"
15 #include "intel_fdi_regs.h"
17 struct intel_fdi_funcs {
18 void (*fdi_link_train)(struct intel_crtc *crtc,
19 const struct intel_crtc_state *crtc_state);
22 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
23 enum pipe pipe, bool state)
27 if (HAS_DDI(dev_priv)) {
29 * DDI does not have a specific FDI_TX register.
31 * FDI is never fed from EDP transcoder
32 * so pipe->transcoder cast is fine here.
34 enum transcoder cpu_transcoder = (enum transcoder)pipe;
35 cur_state = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE;
37 cur_state = intel_de_read(dev_priv, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE;
39 I915_STATE_WARN(dev_priv, cur_state != state,
40 "FDI TX state assertion failure (expected %s, current %s)\n",
41 str_on_off(state), str_on_off(cur_state));
44 void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe)
46 assert_fdi_tx(i915, pipe, true);
49 void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe)
51 assert_fdi_tx(i915, pipe, false);
54 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
55 enum pipe pipe, bool state)
59 cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
60 I915_STATE_WARN(dev_priv, cur_state != state,
61 "FDI RX state assertion failure (expected %s, current %s)\n",
62 str_on_off(state), str_on_off(cur_state));
65 void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe)
67 assert_fdi_rx(i915, pipe, true);
70 void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe)
72 assert_fdi_rx(i915, pipe, false);
75 void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
80 /* ILK FDI PLL is always enabled */
81 if (IS_IRONLAKE(i915))
84 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
88 cur_state = intel_de_read(i915, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE;
89 I915_STATE_WARN(i915, !cur_state,
90 "FDI TX PLL assertion failure, should be active but is disabled\n");
93 static void assert_fdi_rx_pll(struct drm_i915_private *i915,
94 enum pipe pipe, bool state)
98 cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
99 I915_STATE_WARN(i915, cur_state != state,
100 "FDI RX PLL assertion failure (expected %s, current %s)\n",
101 str_on_off(state), str_on_off(cur_state));
104 void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
106 assert_fdi_rx_pll(i915, pipe, true);
109 void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
111 assert_fdi_rx_pll(i915, pipe, false);
114 void intel_fdi_link_train(struct intel_crtc *crtc,
115 const struct intel_crtc_state *crtc_state)
117 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
119 dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state);
122 /* units of 100MHz */
123 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
125 if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
126 return crtc_state->fdi_lanes;
131 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
132 struct intel_crtc_state *pipe_config)
134 struct drm_i915_private *dev_priv = to_i915(dev);
135 struct drm_atomic_state *state = pipe_config->uapi.state;
136 struct intel_crtc *other_crtc;
137 struct intel_crtc_state *other_crtc_state;
139 drm_dbg_kms(&dev_priv->drm,
140 "checking fdi config on pipe %c, lanes %i\n",
141 pipe_name(pipe), pipe_config->fdi_lanes);
142 if (pipe_config->fdi_lanes > 4) {
143 drm_dbg_kms(&dev_priv->drm,
144 "invalid fdi lane config on pipe %c: %i lanes\n",
145 pipe_name(pipe), pipe_config->fdi_lanes);
149 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
150 if (pipe_config->fdi_lanes > 2) {
151 drm_dbg_kms(&dev_priv->drm,
152 "only 2 lanes on haswell, required: %i lanes\n",
153 pipe_config->fdi_lanes);
160 if (INTEL_NUM_PIPES(dev_priv) == 2)
163 /* Ivybridge 3 pipe is really complicated */
168 if (pipe_config->fdi_lanes <= 2)
171 other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_C);
173 intel_atomic_get_crtc_state(state, other_crtc);
174 if (IS_ERR(other_crtc_state))
175 return PTR_ERR(other_crtc_state);
177 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
178 drm_dbg_kms(&dev_priv->drm,
179 "invalid shared fdi lane config on pipe %c: %i lanes\n",
180 pipe_name(pipe), pipe_config->fdi_lanes);
185 if (pipe_config->fdi_lanes > 2) {
186 drm_dbg_kms(&dev_priv->drm,
187 "only 2 lanes on pipe %c: required %i lanes\n",
188 pipe_name(pipe), pipe_config->fdi_lanes);
192 other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_B);
194 intel_atomic_get_crtc_state(state, other_crtc);
195 if (IS_ERR(other_crtc_state))
196 return PTR_ERR(other_crtc_state);
198 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
199 drm_dbg_kms(&dev_priv->drm,
200 "fdi link B uses too many lanes to enable link C\n");
210 void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
212 if (IS_IRONLAKE(i915)) {
214 intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
216 i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
217 } else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
218 i915->display.fdi.pll_freq = 270000;
223 drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq);
226 int intel_fdi_link_freq(struct drm_i915_private *i915,
227 const struct intel_crtc_state *pipe_config)
230 return pipe_config->port_clock; /* SPLL */
232 return i915->display.fdi.pll_freq;
235 int ilk_fdi_compute_config(struct intel_crtc *crtc,
236 struct intel_crtc_state *pipe_config)
238 struct drm_device *dev = crtc->base.dev;
239 struct drm_i915_private *i915 = to_i915(dev);
240 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
241 int lane, link_bw, fdi_dotclock, ret;
242 bool needs_recompute = false;
245 /* FDI is a binary signal running at ~2.7GHz, encoding
246 * each output octet as 10 bits. The actual frequency
247 * is stored as a divider into a 100MHz clock, and the
248 * mode pixel clock is stored in units of 1KHz.
249 * Hence the bw of each lane in terms of the mode signal
252 link_bw = intel_fdi_link_freq(i915, pipe_config);
254 fdi_dotclock = adjusted_mode->crtc_clock;
256 lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
257 pipe_config->pipe_bpp);
259 pipe_config->fdi_lanes = lane;
261 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
262 link_bw, &pipe_config->fdi_m_n, false);
264 ret = ilk_check_fdi_lanes(dev, crtc->pipe, pipe_config);
268 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
269 pipe_config->pipe_bpp -= 2*3;
270 drm_dbg_kms(&i915->drm,
271 "fdi link bw constraint, reducing pipe bpp to %i\n",
272 pipe_config->pipe_bpp);
273 needs_recompute = true;
274 pipe_config->bw_constrained = true;
285 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
289 temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
290 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
293 drm_WARN_ON(&dev_priv->drm,
294 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
296 drm_WARN_ON(&dev_priv->drm,
297 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
300 temp &= ~FDI_BC_BIFURCATION_SELECT;
302 temp |= FDI_BC_BIFURCATION_SELECT;
304 drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
305 enable ? "en" : "dis");
306 intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
307 intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
310 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
312 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
313 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
315 switch (crtc->pipe) {
319 if (crtc_state->fdi_lanes > 2)
320 cpt_set_fdi_bc_bifurcation(dev_priv, false);
322 cpt_set_fdi_bc_bifurcation(dev_priv, true);
326 cpt_set_fdi_bc_bifurcation(dev_priv, true);
330 MISSING_CASE(crtc->pipe);
334 void intel_fdi_normal_train(struct intel_crtc *crtc)
336 struct drm_device *dev = crtc->base.dev;
337 struct drm_i915_private *dev_priv = to_i915(dev);
338 enum pipe pipe = crtc->pipe;
342 /* enable normal train */
343 reg = FDI_TX_CTL(pipe);
344 temp = intel_de_read(dev_priv, reg);
345 if (IS_IVYBRIDGE(dev_priv)) {
346 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
347 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
349 temp &= ~FDI_LINK_TRAIN_NONE;
350 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
352 intel_de_write(dev_priv, reg, temp);
354 reg = FDI_RX_CTL(pipe);
355 temp = intel_de_read(dev_priv, reg);
356 if (HAS_PCH_CPT(dev_priv)) {
357 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
358 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
360 temp &= ~FDI_LINK_TRAIN_NONE;
361 temp |= FDI_LINK_TRAIN_NONE;
363 intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
365 /* wait one idle pattern time */
366 intel_de_posting_read(dev_priv, reg);
369 /* IVB wants error correction enabled */
370 if (IS_IVYBRIDGE(dev_priv))
371 intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
374 /* The FDI link training functions for ILK/Ibexpeak. */
375 static void ilk_fdi_link_train(struct intel_crtc *crtc,
376 const struct intel_crtc_state *crtc_state)
378 struct drm_device *dev = crtc->base.dev;
379 struct drm_i915_private *dev_priv = to_i915(dev);
380 enum pipe pipe = crtc->pipe;
385 * Write the TU size bits before fdi link training, so that error
388 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
389 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
391 /* FDI needs bits from pipe first */
392 assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder);
394 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
396 reg = FDI_RX_IMR(pipe);
397 temp = intel_de_read(dev_priv, reg);
398 temp &= ~FDI_RX_SYMBOL_LOCK;
399 temp &= ~FDI_RX_BIT_LOCK;
400 intel_de_write(dev_priv, reg, temp);
401 intel_de_read(dev_priv, reg);
404 /* enable CPU FDI TX and PCH FDI RX */
405 reg = FDI_TX_CTL(pipe);
406 temp = intel_de_read(dev_priv, reg);
407 temp &= ~FDI_DP_PORT_WIDTH_MASK;
408 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
409 temp &= ~FDI_LINK_TRAIN_NONE;
410 temp |= FDI_LINK_TRAIN_PATTERN_1;
411 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
413 reg = FDI_RX_CTL(pipe);
414 temp = intel_de_read(dev_priv, reg);
415 temp &= ~FDI_LINK_TRAIN_NONE;
416 temp |= FDI_LINK_TRAIN_PATTERN_1;
417 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
419 intel_de_posting_read(dev_priv, reg);
422 /* Ironlake workaround, enable clock pointer after FDI enable*/
423 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
424 FDI_RX_PHASE_SYNC_POINTER_OVR);
425 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
426 FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
428 reg = FDI_RX_IIR(pipe);
429 for (tries = 0; tries < 5; tries++) {
430 temp = intel_de_read(dev_priv, reg);
431 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
433 if ((temp & FDI_RX_BIT_LOCK)) {
434 drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
435 intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
440 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
443 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
444 FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
445 intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
446 FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
447 intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
450 reg = FDI_RX_IIR(pipe);
451 for (tries = 0; tries < 5; tries++) {
452 temp = intel_de_read(dev_priv, reg);
453 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
455 if (temp & FDI_RX_SYMBOL_LOCK) {
456 intel_de_write(dev_priv, reg,
457 temp | FDI_RX_SYMBOL_LOCK);
458 drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
463 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
465 drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
469 static const int snb_b_fdi_train_param[] = {
470 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
471 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
472 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
473 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
476 /* The FDI link training functions for SNB/Cougarpoint. */
477 static void gen6_fdi_link_train(struct intel_crtc *crtc,
478 const struct intel_crtc_state *crtc_state)
480 struct drm_device *dev = crtc->base.dev;
481 struct drm_i915_private *dev_priv = to_i915(dev);
482 enum pipe pipe = crtc->pipe;
487 * Write the TU size bits before fdi link training, so that error
490 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
491 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
493 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
495 reg = FDI_RX_IMR(pipe);
496 temp = intel_de_read(dev_priv, reg);
497 temp &= ~FDI_RX_SYMBOL_LOCK;
498 temp &= ~FDI_RX_BIT_LOCK;
499 intel_de_write(dev_priv, reg, temp);
501 intel_de_posting_read(dev_priv, reg);
504 /* enable CPU FDI TX and PCH FDI RX */
505 reg = FDI_TX_CTL(pipe);
506 temp = intel_de_read(dev_priv, reg);
507 temp &= ~FDI_DP_PORT_WIDTH_MASK;
508 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
509 temp &= ~FDI_LINK_TRAIN_NONE;
510 temp |= FDI_LINK_TRAIN_PATTERN_1;
511 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
513 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
514 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
516 intel_de_write(dev_priv, FDI_RX_MISC(pipe),
517 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
519 reg = FDI_RX_CTL(pipe);
520 temp = intel_de_read(dev_priv, reg);
521 if (HAS_PCH_CPT(dev_priv)) {
522 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
523 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
525 temp &= ~FDI_LINK_TRAIN_NONE;
526 temp |= FDI_LINK_TRAIN_PATTERN_1;
528 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
530 intel_de_posting_read(dev_priv, reg);
533 for (i = 0; i < 4; i++) {
534 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
535 FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
536 intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
539 for (retry = 0; retry < 5; retry++) {
540 reg = FDI_RX_IIR(pipe);
541 temp = intel_de_read(dev_priv, reg);
542 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
543 if (temp & FDI_RX_BIT_LOCK) {
544 intel_de_write(dev_priv, reg,
545 temp | FDI_RX_BIT_LOCK);
546 drm_dbg_kms(&dev_priv->drm,
547 "FDI train 1 done.\n");
556 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
559 reg = FDI_TX_CTL(pipe);
560 temp = intel_de_read(dev_priv, reg);
561 temp &= ~FDI_LINK_TRAIN_NONE;
562 temp |= FDI_LINK_TRAIN_PATTERN_2;
563 if (IS_SANDYBRIDGE(dev_priv)) {
564 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
566 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
568 intel_de_write(dev_priv, reg, temp);
570 reg = FDI_RX_CTL(pipe);
571 temp = intel_de_read(dev_priv, reg);
572 if (HAS_PCH_CPT(dev_priv)) {
573 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
574 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
576 temp &= ~FDI_LINK_TRAIN_NONE;
577 temp |= FDI_LINK_TRAIN_PATTERN_2;
579 intel_de_write(dev_priv, reg, temp);
581 intel_de_posting_read(dev_priv, reg);
584 for (i = 0; i < 4; i++) {
585 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
586 FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
587 intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
590 for (retry = 0; retry < 5; retry++) {
591 reg = FDI_RX_IIR(pipe);
592 temp = intel_de_read(dev_priv, reg);
593 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
594 if (temp & FDI_RX_SYMBOL_LOCK) {
595 intel_de_write(dev_priv, reg,
596 temp | FDI_RX_SYMBOL_LOCK);
597 drm_dbg_kms(&dev_priv->drm,
598 "FDI train 2 done.\n");
607 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
609 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
612 /* Manual link training for Ivy Bridge A0 parts */
613 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
614 const struct intel_crtc_state *crtc_state)
616 struct drm_device *dev = crtc->base.dev;
617 struct drm_i915_private *dev_priv = to_i915(dev);
618 enum pipe pipe = crtc->pipe;
622 ivb_update_fdi_bc_bifurcation(crtc_state);
625 * Write the TU size bits before fdi link training, so that error
628 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
629 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
631 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
633 reg = FDI_RX_IMR(pipe);
634 temp = intel_de_read(dev_priv, reg);
635 temp &= ~FDI_RX_SYMBOL_LOCK;
636 temp &= ~FDI_RX_BIT_LOCK;
637 intel_de_write(dev_priv, reg, temp);
639 intel_de_posting_read(dev_priv, reg);
642 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
643 intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
645 /* Try each vswing and preemphasis setting twice before moving on */
646 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
647 /* disable first in case we need to retry */
648 reg = FDI_TX_CTL(pipe);
649 temp = intel_de_read(dev_priv, reg);
650 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
651 temp &= ~FDI_TX_ENABLE;
652 intel_de_write(dev_priv, reg, temp);
654 reg = FDI_RX_CTL(pipe);
655 temp = intel_de_read(dev_priv, reg);
656 temp &= ~FDI_LINK_TRAIN_AUTO;
657 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
658 temp &= ~FDI_RX_ENABLE;
659 intel_de_write(dev_priv, reg, temp);
661 /* enable CPU FDI TX and PCH FDI RX */
662 reg = FDI_TX_CTL(pipe);
663 temp = intel_de_read(dev_priv, reg);
664 temp &= ~FDI_DP_PORT_WIDTH_MASK;
665 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
666 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
667 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
668 temp |= snb_b_fdi_train_param[j/2];
669 temp |= FDI_COMPOSITE_SYNC;
670 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
672 intel_de_write(dev_priv, FDI_RX_MISC(pipe),
673 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
675 reg = FDI_RX_CTL(pipe);
676 temp = intel_de_read(dev_priv, reg);
677 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
678 temp |= FDI_COMPOSITE_SYNC;
679 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
681 intel_de_posting_read(dev_priv, reg);
682 udelay(1); /* should be 0.5us */
684 for (i = 0; i < 4; i++) {
685 reg = FDI_RX_IIR(pipe);
686 temp = intel_de_read(dev_priv, reg);
687 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
689 if (temp & FDI_RX_BIT_LOCK ||
690 (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
691 intel_de_write(dev_priv, reg,
692 temp | FDI_RX_BIT_LOCK);
693 drm_dbg_kms(&dev_priv->drm,
694 "FDI train 1 done, level %i.\n",
698 udelay(1); /* should be 0.5us */
701 drm_dbg_kms(&dev_priv->drm,
702 "FDI train 1 fail on vswing %d\n", j / 2);
707 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
708 FDI_LINK_TRAIN_NONE_IVB,
709 FDI_LINK_TRAIN_PATTERN_2_IVB);
710 intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
711 FDI_LINK_TRAIN_PATTERN_MASK_CPT,
712 FDI_LINK_TRAIN_PATTERN_2_CPT);
713 intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
714 udelay(2); /* should be 1.5us */
716 for (i = 0; i < 4; i++) {
717 reg = FDI_RX_IIR(pipe);
718 temp = intel_de_read(dev_priv, reg);
719 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
721 if (temp & FDI_RX_SYMBOL_LOCK ||
722 (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
723 intel_de_write(dev_priv, reg,
724 temp | FDI_RX_SYMBOL_LOCK);
725 drm_dbg_kms(&dev_priv->drm,
726 "FDI train 2 done, level %i.\n",
730 udelay(2); /* should be 1.5us */
733 drm_dbg_kms(&dev_priv->drm,
734 "FDI train 2 fail on vswing %d\n", j / 2);
738 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
741 /* Starting with Haswell, different DDI ports can work in FDI mode for
742 * connection to the PCH-located connectors. For this, it is necessary to train
743 * both the DDI port and PCH receiver for the desired DDI buffer settings.
745 * The recommended port to work in FDI mode is DDI E, which we use here. Also,
746 * please note that when FDI mode is active on DDI E, it shares 2 lines with
747 * DDI A (which is used for eDP)
749 void hsw_fdi_link_train(struct intel_encoder *encoder,
750 const struct intel_crtc_state *crtc_state)
752 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
753 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
754 u32 temp, i, rx_ctl_val;
757 encoder->get_buf_trans(encoder, crtc_state, &n_entries);
759 hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
761 /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
762 * mode set "sequence for CRT port" document:
763 * - TP1 to TP2 time with the default value
766 * WaFDIAutoLinkSetTimingOverrride:hsw
768 intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
769 FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
771 /* Enable the PCH Receiver FDI PLL */
772 rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
774 FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
775 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
776 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
779 /* Switch from Rawclk to PCDclk */
780 rx_ctl_val |= FDI_PCDCLK;
781 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
783 /* Configure Port Clock Select */
784 drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
785 intel_ddi_enable_clock(encoder, crtc_state);
787 /* Start the training iterating through available voltages and emphasis,
788 * testing each value twice. */
789 for (i = 0; i < n_entries * 2; i++) {
790 /* Configure DP_TP_CTL with auto-training */
791 intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
792 DP_TP_CTL_FDI_AUTOTRAIN |
793 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
794 DP_TP_CTL_LINK_TRAIN_PAT1 |
797 /* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
798 * DDI E does not support port reversal, the functionality is
799 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
800 * port reversal bit */
801 intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
802 DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2));
803 intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
807 /* Program PCH FDI Receiver TU */
808 intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
810 /* Enable PCH FDI Receiver with auto-training */
811 rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
812 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
813 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
815 /* Wait for FDI receiver lane calibration */
818 /* Unset FDI_RX_MISC pwrdn lanes */
819 intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
820 FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0);
821 intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
823 /* Wait for FDI auto training time */
826 temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
827 if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
828 drm_dbg_kms(&dev_priv->drm,
829 "FDI link training done on step %d\n", i);
834 * Leave things enabled even if we failed to train FDI.
835 * Results in less fireworks from the state checker.
837 if (i == n_entries * 2 - 1) {
838 drm_err(&dev_priv->drm, "FDI link training failed!\n");
842 rx_ctl_val &= ~FDI_RX_ENABLE;
843 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
844 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
846 intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
847 intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
849 /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
850 intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0);
851 intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
853 intel_wait_ddi_buf_idle(dev_priv, PORT_E);
855 /* Reset FDI_RX_MISC pwrdn lanes */
856 intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
857 FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
858 FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
859 intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
862 /* Enable normal pixel sending for FDI */
863 intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
864 DP_TP_CTL_FDI_AUTOTRAIN |
865 DP_TP_CTL_LINK_TRAIN_NORMAL |
866 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
870 void hsw_fdi_disable(struct intel_encoder *encoder)
872 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
875 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
876 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
877 * step 13 is the correct place for it. Step 18 is where it was
878 * originally before the BUN.
880 intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0);
881 intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
882 intel_wait_ddi_buf_idle(dev_priv, PORT_E);
883 intel_ddi_disable_clock(encoder);
884 intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
885 FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
886 FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
887 intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0);
888 intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0);
891 void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
893 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
894 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
895 enum pipe pipe = crtc->pipe;
899 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
900 reg = FDI_RX_CTL(pipe);
901 temp = intel_de_read(dev_priv, reg);
902 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
903 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
904 temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
905 intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
907 intel_de_posting_read(dev_priv, reg);
910 /* Switch from Rawclk to PCDclk */
911 intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK);
912 intel_de_posting_read(dev_priv, reg);
915 /* Enable CPU FDI TX PLL, always on for Ironlake */
916 reg = FDI_TX_CTL(pipe);
917 temp = intel_de_read(dev_priv, reg);
918 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
919 intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
921 intel_de_posting_read(dev_priv, reg);
926 void ilk_fdi_pll_disable(struct intel_crtc *crtc)
928 struct drm_device *dev = crtc->base.dev;
929 struct drm_i915_private *dev_priv = to_i915(dev);
930 enum pipe pipe = crtc->pipe;
932 /* Switch from PCDclk to Rawclk */
933 intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0);
935 /* Disable CPU FDI TX PLL */
936 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0);
937 intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
940 /* Wait for the clocks to turn off. */
941 intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0);
942 intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
946 void ilk_fdi_disable(struct intel_crtc *crtc)
948 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
949 enum pipe pipe = crtc->pipe;
953 /* disable CPU FDI tx and PCH FDI rx */
954 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0);
955 intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
957 reg = FDI_RX_CTL(pipe);
958 temp = intel_de_read(dev_priv, reg);
959 temp &= ~(0x7 << 16);
960 temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
961 intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
963 intel_de_posting_read(dev_priv, reg);
966 /* Ironlake workaround, disable clock pointer after downing FDI */
967 if (HAS_PCH_IBX(dev_priv))
968 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
969 FDI_RX_PHASE_SYNC_POINTER_OVR);
971 /* still set train pattern 1 */
972 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
973 FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1);
975 reg = FDI_RX_CTL(pipe);
976 temp = intel_de_read(dev_priv, reg);
977 if (HAS_PCH_CPT(dev_priv)) {
978 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
979 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
981 temp &= ~FDI_LINK_TRAIN_NONE;
982 temp |= FDI_LINK_TRAIN_PATTERN_1;
984 /* BPC in FDI rx is consistent with that in TRANSCONF */
985 temp &= ~(0x07 << 16);
986 temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
987 intel_de_write(dev_priv, reg, temp);
989 intel_de_posting_read(dev_priv, reg);
993 static const struct intel_fdi_funcs ilk_funcs = {
994 .fdi_link_train = ilk_fdi_link_train,
997 static const struct intel_fdi_funcs gen6_funcs = {
998 .fdi_link_train = gen6_fdi_link_train,
1001 static const struct intel_fdi_funcs ivb_funcs = {
1002 .fdi_link_train = ivb_manual_fdi_link_train,
1006 intel_fdi_init_hook(struct drm_i915_private *dev_priv)
1008 if (IS_IRONLAKE(dev_priv)) {
1009 dev_priv->display.funcs.fdi = &ilk_funcs;
1010 } else if (IS_SANDYBRIDGE(dev_priv)) {
1011 dev_priv->display.funcs.fdi = &gen6_funcs;
1012 } else if (IS_IVYBRIDGE(dev_priv)) {
1013 /* FIXME: detect B0+ stepping and use auto training */
1014 dev_priv->display.funcs.fdi = &ivb_funcs;