1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include <linux/string_helpers.h>
9 #include "intel_atomic.h"
10 #include "intel_crtc.h"
11 #include "intel_ddi.h"
13 #include "intel_display_types.h"
14 #include "intel_fdi.h"
15 #include "intel_fdi_regs.h"
17 struct intel_fdi_funcs {
18 void (*fdi_link_train)(struct intel_crtc *crtc,
19 const struct intel_crtc_state *crtc_state);
22 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
23 enum pipe pipe, bool state)
27 if (HAS_DDI(dev_priv)) {
29 * DDI does not have a specific FDI_TX register.
31 * FDI is never fed from EDP transcoder
32 * so pipe->transcoder cast is fine here.
34 enum transcoder cpu_transcoder = (enum transcoder)pipe;
35 cur_state = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE;
37 cur_state = intel_de_read(dev_priv, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE;
39 I915_STATE_WARN(cur_state != state,
40 "FDI TX state assertion failure (expected %s, current %s)\n",
41 str_on_off(state), str_on_off(cur_state));
44 void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe)
46 assert_fdi_tx(i915, pipe, true);
49 void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe)
51 assert_fdi_tx(i915, pipe, false);
54 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
55 enum pipe pipe, bool state)
59 cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
60 I915_STATE_WARN(cur_state != state,
61 "FDI RX state assertion failure (expected %s, current %s)\n",
62 str_on_off(state), str_on_off(cur_state));
65 void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe)
67 assert_fdi_rx(i915, pipe, true);
70 void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe)
72 assert_fdi_rx(i915, pipe, false);
75 void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
80 /* ILK FDI PLL is always enabled */
81 if (IS_IRONLAKE(i915))
84 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
88 cur_state = intel_de_read(i915, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE;
89 I915_STATE_WARN(!cur_state, "FDI TX PLL assertion failure, should be active but is disabled\n");
92 static void assert_fdi_rx_pll(struct drm_i915_private *i915,
93 enum pipe pipe, bool state)
97 cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
98 I915_STATE_WARN(cur_state != state,
99 "FDI RX PLL assertion failure (expected %s, current %s)\n",
100 str_on_off(state), str_on_off(cur_state));
103 void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
105 assert_fdi_rx_pll(i915, pipe, true);
108 void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
110 assert_fdi_rx_pll(i915, pipe, false);
113 void intel_fdi_link_train(struct intel_crtc *crtc,
114 const struct intel_crtc_state *crtc_state)
116 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
118 dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state);
121 /* units of 100MHz */
122 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
124 if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
125 return crtc_state->fdi_lanes;
130 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
131 struct intel_crtc_state *pipe_config)
133 struct drm_i915_private *dev_priv = to_i915(dev);
134 struct drm_atomic_state *state = pipe_config->uapi.state;
135 struct intel_crtc *other_crtc;
136 struct intel_crtc_state *other_crtc_state;
138 drm_dbg_kms(&dev_priv->drm,
139 "checking fdi config on pipe %c, lanes %i\n",
140 pipe_name(pipe), pipe_config->fdi_lanes);
141 if (pipe_config->fdi_lanes > 4) {
142 drm_dbg_kms(&dev_priv->drm,
143 "invalid fdi lane config on pipe %c: %i lanes\n",
144 pipe_name(pipe), pipe_config->fdi_lanes);
148 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
149 if (pipe_config->fdi_lanes > 2) {
150 drm_dbg_kms(&dev_priv->drm,
151 "only 2 lanes on haswell, required: %i lanes\n",
152 pipe_config->fdi_lanes);
159 if (INTEL_NUM_PIPES(dev_priv) == 2)
162 /* Ivybridge 3 pipe is really complicated */
167 if (pipe_config->fdi_lanes <= 2)
170 other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_C);
172 intel_atomic_get_crtc_state(state, other_crtc);
173 if (IS_ERR(other_crtc_state))
174 return PTR_ERR(other_crtc_state);
176 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
177 drm_dbg_kms(&dev_priv->drm,
178 "invalid shared fdi lane config on pipe %c: %i lanes\n",
179 pipe_name(pipe), pipe_config->fdi_lanes);
184 if (pipe_config->fdi_lanes > 2) {
185 drm_dbg_kms(&dev_priv->drm,
186 "only 2 lanes on pipe %c: required %i lanes\n",
187 pipe_name(pipe), pipe_config->fdi_lanes);
191 other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_B);
193 intel_atomic_get_crtc_state(state, other_crtc);
194 if (IS_ERR(other_crtc_state))
195 return PTR_ERR(other_crtc_state);
197 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
198 drm_dbg_kms(&dev_priv->drm,
199 "fdi link B uses too many lanes to enable link C\n");
209 void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
211 if (IS_IRONLAKE(i915)) {
213 intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
215 i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
216 } else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
217 i915->display.fdi.pll_freq = 270000;
222 drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq);
225 int intel_fdi_link_freq(struct drm_i915_private *i915,
226 const struct intel_crtc_state *pipe_config)
229 return pipe_config->port_clock; /* SPLL */
231 return i915->display.fdi.pll_freq;
234 int ilk_fdi_compute_config(struct intel_crtc *crtc,
235 struct intel_crtc_state *pipe_config)
237 struct drm_device *dev = crtc->base.dev;
238 struct drm_i915_private *i915 = to_i915(dev);
239 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
240 int lane, link_bw, fdi_dotclock, ret;
241 bool needs_recompute = false;
244 /* FDI is a binary signal running at ~2.7GHz, encoding
245 * each output octet as 10 bits. The actual frequency
246 * is stored as a divider into a 100MHz clock, and the
247 * mode pixel clock is stored in units of 1KHz.
248 * Hence the bw of each lane in terms of the mode signal
251 link_bw = intel_fdi_link_freq(i915, pipe_config);
253 fdi_dotclock = adjusted_mode->crtc_clock;
255 lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
256 pipe_config->pipe_bpp);
258 pipe_config->fdi_lanes = lane;
260 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
261 link_bw, &pipe_config->fdi_m_n, false);
263 ret = ilk_check_fdi_lanes(dev, crtc->pipe, pipe_config);
267 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
268 pipe_config->pipe_bpp -= 2*3;
269 drm_dbg_kms(&i915->drm,
270 "fdi link bw constraint, reducing pipe bpp to %i\n",
271 pipe_config->pipe_bpp);
272 needs_recompute = true;
273 pipe_config->bw_constrained = true;
284 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
288 temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
289 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
292 drm_WARN_ON(&dev_priv->drm,
293 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
295 drm_WARN_ON(&dev_priv->drm,
296 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
299 temp &= ~FDI_BC_BIFURCATION_SELECT;
301 temp |= FDI_BC_BIFURCATION_SELECT;
303 drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
304 enable ? "en" : "dis");
305 intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
306 intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
309 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
311 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
312 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
314 switch (crtc->pipe) {
318 if (crtc_state->fdi_lanes > 2)
319 cpt_set_fdi_bc_bifurcation(dev_priv, false);
321 cpt_set_fdi_bc_bifurcation(dev_priv, true);
325 cpt_set_fdi_bc_bifurcation(dev_priv, true);
329 MISSING_CASE(crtc->pipe);
333 void intel_fdi_normal_train(struct intel_crtc *crtc)
335 struct drm_device *dev = crtc->base.dev;
336 struct drm_i915_private *dev_priv = to_i915(dev);
337 enum pipe pipe = crtc->pipe;
341 /* enable normal train */
342 reg = FDI_TX_CTL(pipe);
343 temp = intel_de_read(dev_priv, reg);
344 if (IS_IVYBRIDGE(dev_priv)) {
345 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
346 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
348 temp &= ~FDI_LINK_TRAIN_NONE;
349 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
351 intel_de_write(dev_priv, reg, temp);
353 reg = FDI_RX_CTL(pipe);
354 temp = intel_de_read(dev_priv, reg);
355 if (HAS_PCH_CPT(dev_priv)) {
356 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
357 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
359 temp &= ~FDI_LINK_TRAIN_NONE;
360 temp |= FDI_LINK_TRAIN_NONE;
362 intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
364 /* wait one idle pattern time */
365 intel_de_posting_read(dev_priv, reg);
368 /* IVB wants error correction enabled */
369 if (IS_IVYBRIDGE(dev_priv))
370 intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
373 /* The FDI link training functions for ILK/Ibexpeak. */
374 static void ilk_fdi_link_train(struct intel_crtc *crtc,
375 const struct intel_crtc_state *crtc_state)
377 struct drm_device *dev = crtc->base.dev;
378 struct drm_i915_private *dev_priv = to_i915(dev);
379 enum pipe pipe = crtc->pipe;
384 * Write the TU size bits before fdi link training, so that error
387 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
388 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
390 /* FDI needs bits from pipe first */
391 assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder);
393 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
395 reg = FDI_RX_IMR(pipe);
396 temp = intel_de_read(dev_priv, reg);
397 temp &= ~FDI_RX_SYMBOL_LOCK;
398 temp &= ~FDI_RX_BIT_LOCK;
399 intel_de_write(dev_priv, reg, temp);
400 intel_de_read(dev_priv, reg);
403 /* enable CPU FDI TX and PCH FDI RX */
404 reg = FDI_TX_CTL(pipe);
405 temp = intel_de_read(dev_priv, reg);
406 temp &= ~FDI_DP_PORT_WIDTH_MASK;
407 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
408 temp &= ~FDI_LINK_TRAIN_NONE;
409 temp |= FDI_LINK_TRAIN_PATTERN_1;
410 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
412 reg = FDI_RX_CTL(pipe);
413 temp = intel_de_read(dev_priv, reg);
414 temp &= ~FDI_LINK_TRAIN_NONE;
415 temp |= FDI_LINK_TRAIN_PATTERN_1;
416 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
418 intel_de_posting_read(dev_priv, reg);
421 /* Ironlake workaround, enable clock pointer after FDI enable*/
422 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
423 FDI_RX_PHASE_SYNC_POINTER_OVR);
424 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
425 FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
427 reg = FDI_RX_IIR(pipe);
428 for (tries = 0; tries < 5; tries++) {
429 temp = intel_de_read(dev_priv, reg);
430 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
432 if ((temp & FDI_RX_BIT_LOCK)) {
433 drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
434 intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
439 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
442 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
443 FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
444 intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
445 FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
446 intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
449 reg = FDI_RX_IIR(pipe);
450 for (tries = 0; tries < 5; tries++) {
451 temp = intel_de_read(dev_priv, reg);
452 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
454 if (temp & FDI_RX_SYMBOL_LOCK) {
455 intel_de_write(dev_priv, reg,
456 temp | FDI_RX_SYMBOL_LOCK);
457 drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
462 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
464 drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
468 static const int snb_b_fdi_train_param[] = {
469 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
470 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
471 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
472 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
475 /* The FDI link training functions for SNB/Cougarpoint. */
476 static void gen6_fdi_link_train(struct intel_crtc *crtc,
477 const struct intel_crtc_state *crtc_state)
479 struct drm_device *dev = crtc->base.dev;
480 struct drm_i915_private *dev_priv = to_i915(dev);
481 enum pipe pipe = crtc->pipe;
486 * Write the TU size bits before fdi link training, so that error
489 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
490 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
492 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
494 reg = FDI_RX_IMR(pipe);
495 temp = intel_de_read(dev_priv, reg);
496 temp &= ~FDI_RX_SYMBOL_LOCK;
497 temp &= ~FDI_RX_BIT_LOCK;
498 intel_de_write(dev_priv, reg, temp);
500 intel_de_posting_read(dev_priv, reg);
503 /* enable CPU FDI TX and PCH FDI RX */
504 reg = FDI_TX_CTL(pipe);
505 temp = intel_de_read(dev_priv, reg);
506 temp &= ~FDI_DP_PORT_WIDTH_MASK;
507 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
508 temp &= ~FDI_LINK_TRAIN_NONE;
509 temp |= FDI_LINK_TRAIN_PATTERN_1;
510 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
512 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
513 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
515 intel_de_write(dev_priv, FDI_RX_MISC(pipe),
516 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
518 reg = FDI_RX_CTL(pipe);
519 temp = intel_de_read(dev_priv, reg);
520 if (HAS_PCH_CPT(dev_priv)) {
521 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
522 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
524 temp &= ~FDI_LINK_TRAIN_NONE;
525 temp |= FDI_LINK_TRAIN_PATTERN_1;
527 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
529 intel_de_posting_read(dev_priv, reg);
532 for (i = 0; i < 4; i++) {
533 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
534 FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
535 intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
538 for (retry = 0; retry < 5; retry++) {
539 reg = FDI_RX_IIR(pipe);
540 temp = intel_de_read(dev_priv, reg);
541 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
542 if (temp & FDI_RX_BIT_LOCK) {
543 intel_de_write(dev_priv, reg,
544 temp | FDI_RX_BIT_LOCK);
545 drm_dbg_kms(&dev_priv->drm,
546 "FDI train 1 done.\n");
555 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
558 reg = FDI_TX_CTL(pipe);
559 temp = intel_de_read(dev_priv, reg);
560 temp &= ~FDI_LINK_TRAIN_NONE;
561 temp |= FDI_LINK_TRAIN_PATTERN_2;
562 if (IS_SANDYBRIDGE(dev_priv)) {
563 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
565 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
567 intel_de_write(dev_priv, reg, temp);
569 reg = FDI_RX_CTL(pipe);
570 temp = intel_de_read(dev_priv, reg);
571 if (HAS_PCH_CPT(dev_priv)) {
572 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
573 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
575 temp &= ~FDI_LINK_TRAIN_NONE;
576 temp |= FDI_LINK_TRAIN_PATTERN_2;
578 intel_de_write(dev_priv, reg, temp);
580 intel_de_posting_read(dev_priv, reg);
583 for (i = 0; i < 4; i++) {
584 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
585 FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
586 intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
589 for (retry = 0; retry < 5; retry++) {
590 reg = FDI_RX_IIR(pipe);
591 temp = intel_de_read(dev_priv, reg);
592 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
593 if (temp & FDI_RX_SYMBOL_LOCK) {
594 intel_de_write(dev_priv, reg,
595 temp | FDI_RX_SYMBOL_LOCK);
596 drm_dbg_kms(&dev_priv->drm,
597 "FDI train 2 done.\n");
606 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
608 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
611 /* Manual link training for Ivy Bridge A0 parts */
612 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
613 const struct intel_crtc_state *crtc_state)
615 struct drm_device *dev = crtc->base.dev;
616 struct drm_i915_private *dev_priv = to_i915(dev);
617 enum pipe pipe = crtc->pipe;
621 ivb_update_fdi_bc_bifurcation(crtc_state);
624 * Write the TU size bits before fdi link training, so that error
627 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
628 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
630 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
632 reg = FDI_RX_IMR(pipe);
633 temp = intel_de_read(dev_priv, reg);
634 temp &= ~FDI_RX_SYMBOL_LOCK;
635 temp &= ~FDI_RX_BIT_LOCK;
636 intel_de_write(dev_priv, reg, temp);
638 intel_de_posting_read(dev_priv, reg);
641 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
642 intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
644 /* Try each vswing and preemphasis setting twice before moving on */
645 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
646 /* disable first in case we need to retry */
647 reg = FDI_TX_CTL(pipe);
648 temp = intel_de_read(dev_priv, reg);
649 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
650 temp &= ~FDI_TX_ENABLE;
651 intel_de_write(dev_priv, reg, temp);
653 reg = FDI_RX_CTL(pipe);
654 temp = intel_de_read(dev_priv, reg);
655 temp &= ~FDI_LINK_TRAIN_AUTO;
656 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
657 temp &= ~FDI_RX_ENABLE;
658 intel_de_write(dev_priv, reg, temp);
660 /* enable CPU FDI TX and PCH FDI RX */
661 reg = FDI_TX_CTL(pipe);
662 temp = intel_de_read(dev_priv, reg);
663 temp &= ~FDI_DP_PORT_WIDTH_MASK;
664 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
665 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
666 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
667 temp |= snb_b_fdi_train_param[j/2];
668 temp |= FDI_COMPOSITE_SYNC;
669 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
671 intel_de_write(dev_priv, FDI_RX_MISC(pipe),
672 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
674 reg = FDI_RX_CTL(pipe);
675 temp = intel_de_read(dev_priv, reg);
676 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
677 temp |= FDI_COMPOSITE_SYNC;
678 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
680 intel_de_posting_read(dev_priv, reg);
681 udelay(1); /* should be 0.5us */
683 for (i = 0; i < 4; i++) {
684 reg = FDI_RX_IIR(pipe);
685 temp = intel_de_read(dev_priv, reg);
686 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
688 if (temp & FDI_RX_BIT_LOCK ||
689 (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
690 intel_de_write(dev_priv, reg,
691 temp | FDI_RX_BIT_LOCK);
692 drm_dbg_kms(&dev_priv->drm,
693 "FDI train 1 done, level %i.\n",
697 udelay(1); /* should be 0.5us */
700 drm_dbg_kms(&dev_priv->drm,
701 "FDI train 1 fail on vswing %d\n", j / 2);
706 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
707 FDI_LINK_TRAIN_NONE_IVB,
708 FDI_LINK_TRAIN_PATTERN_2_IVB);
709 intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
710 FDI_LINK_TRAIN_PATTERN_MASK_CPT,
711 FDI_LINK_TRAIN_PATTERN_2_CPT);
712 intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
713 udelay(2); /* should be 1.5us */
715 for (i = 0; i < 4; i++) {
716 reg = FDI_RX_IIR(pipe);
717 temp = intel_de_read(dev_priv, reg);
718 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
720 if (temp & FDI_RX_SYMBOL_LOCK ||
721 (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
722 intel_de_write(dev_priv, reg,
723 temp | FDI_RX_SYMBOL_LOCK);
724 drm_dbg_kms(&dev_priv->drm,
725 "FDI train 2 done, level %i.\n",
729 udelay(2); /* should be 1.5us */
732 drm_dbg_kms(&dev_priv->drm,
733 "FDI train 2 fail on vswing %d\n", j / 2);
737 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
740 /* Starting with Haswell, different DDI ports can work in FDI mode for
741 * connection to the PCH-located connectors. For this, it is necessary to train
742 * both the DDI port and PCH receiver for the desired DDI buffer settings.
744 * The recommended port to work in FDI mode is DDI E, which we use here. Also,
745 * please note that when FDI mode is active on DDI E, it shares 2 lines with
746 * DDI A (which is used for eDP)
748 void hsw_fdi_link_train(struct intel_encoder *encoder,
749 const struct intel_crtc_state *crtc_state)
751 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
752 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
753 u32 temp, i, rx_ctl_val;
756 encoder->get_buf_trans(encoder, crtc_state, &n_entries);
758 hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
760 /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
761 * mode set "sequence for CRT port" document:
762 * - TP1 to TP2 time with the default value
765 * WaFDIAutoLinkSetTimingOverrride:hsw
767 intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
768 FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
770 /* Enable the PCH Receiver FDI PLL */
771 rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
773 FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
774 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
775 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
778 /* Switch from Rawclk to PCDclk */
779 rx_ctl_val |= FDI_PCDCLK;
780 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
782 /* Configure Port Clock Select */
783 drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
784 intel_ddi_enable_clock(encoder, crtc_state);
786 /* Start the training iterating through available voltages and emphasis,
787 * testing each value twice. */
788 for (i = 0; i < n_entries * 2; i++) {
789 /* Configure DP_TP_CTL with auto-training */
790 intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
791 DP_TP_CTL_FDI_AUTOTRAIN |
792 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
793 DP_TP_CTL_LINK_TRAIN_PAT1 |
796 /* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
797 * DDI E does not support port reversal, the functionality is
798 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
799 * port reversal bit */
800 intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
801 DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2));
802 intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
806 /* Program PCH FDI Receiver TU */
807 intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
809 /* Enable PCH FDI Receiver with auto-training */
810 rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
811 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
812 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
814 /* Wait for FDI receiver lane calibration */
817 /* Unset FDI_RX_MISC pwrdn lanes */
818 intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
819 FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0);
820 intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
822 /* Wait for FDI auto training time */
825 temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
826 if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
827 drm_dbg_kms(&dev_priv->drm,
828 "FDI link training done on step %d\n", i);
833 * Leave things enabled even if we failed to train FDI.
834 * Results in less fireworks from the state checker.
836 if (i == n_entries * 2 - 1) {
837 drm_err(&dev_priv->drm, "FDI link training failed!\n");
841 rx_ctl_val &= ~FDI_RX_ENABLE;
842 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
843 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
845 intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
846 intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
848 /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
849 intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0);
850 intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
852 intel_wait_ddi_buf_idle(dev_priv, PORT_E);
854 /* Reset FDI_RX_MISC pwrdn lanes */
855 intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
856 FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
857 FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
858 intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
861 /* Enable normal pixel sending for FDI */
862 intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
863 DP_TP_CTL_FDI_AUTOTRAIN |
864 DP_TP_CTL_LINK_TRAIN_NORMAL |
865 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
869 void hsw_fdi_disable(struct intel_encoder *encoder)
871 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
874 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
875 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
876 * step 13 is the correct place for it. Step 18 is where it was
877 * originally before the BUN.
879 intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0);
880 intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
881 intel_wait_ddi_buf_idle(dev_priv, PORT_E);
882 intel_ddi_disable_clock(encoder);
883 intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
884 FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
885 FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
886 intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0);
887 intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0);
890 void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
892 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
893 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
894 enum pipe pipe = crtc->pipe;
898 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
899 reg = FDI_RX_CTL(pipe);
900 temp = intel_de_read(dev_priv, reg);
901 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
902 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
903 temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
904 intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
906 intel_de_posting_read(dev_priv, reg);
909 /* Switch from Rawclk to PCDclk */
910 intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK);
911 intel_de_posting_read(dev_priv, reg);
914 /* Enable CPU FDI TX PLL, always on for Ironlake */
915 reg = FDI_TX_CTL(pipe);
916 temp = intel_de_read(dev_priv, reg);
917 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
918 intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
920 intel_de_posting_read(dev_priv, reg);
925 void ilk_fdi_pll_disable(struct intel_crtc *crtc)
927 struct drm_device *dev = crtc->base.dev;
928 struct drm_i915_private *dev_priv = to_i915(dev);
929 enum pipe pipe = crtc->pipe;
931 /* Switch from PCDclk to Rawclk */
932 intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0);
934 /* Disable CPU FDI TX PLL */
935 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0);
936 intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
939 /* Wait for the clocks to turn off. */
940 intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0);
941 intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
945 void ilk_fdi_disable(struct intel_crtc *crtc)
947 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
948 enum pipe pipe = crtc->pipe;
952 /* disable CPU FDI tx and PCH FDI rx */
953 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0);
954 intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
956 reg = FDI_RX_CTL(pipe);
957 temp = intel_de_read(dev_priv, reg);
958 temp &= ~(0x7 << 16);
959 temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
960 intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
962 intel_de_posting_read(dev_priv, reg);
965 /* Ironlake workaround, disable clock pointer after downing FDI */
966 if (HAS_PCH_IBX(dev_priv))
967 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
968 FDI_RX_PHASE_SYNC_POINTER_OVR);
970 /* still set train pattern 1 */
971 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
972 FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1);
974 reg = FDI_RX_CTL(pipe);
975 temp = intel_de_read(dev_priv, reg);
976 if (HAS_PCH_CPT(dev_priv)) {
977 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
978 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
980 temp &= ~FDI_LINK_TRAIN_NONE;
981 temp |= FDI_LINK_TRAIN_PATTERN_1;
983 /* BPC in FDI rx is consistent with that in TRANSCONF */
984 temp &= ~(0x07 << 16);
985 temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
986 intel_de_write(dev_priv, reg, temp);
988 intel_de_posting_read(dev_priv, reg);
992 static const struct intel_fdi_funcs ilk_funcs = {
993 .fdi_link_train = ilk_fdi_link_train,
996 static const struct intel_fdi_funcs gen6_funcs = {
997 .fdi_link_train = gen6_fdi_link_train,
1000 static const struct intel_fdi_funcs ivb_funcs = {
1001 .fdi_link_train = ivb_manual_fdi_link_train,
1005 intel_fdi_init_hook(struct drm_i915_private *dev_priv)
1007 if (IS_IRONLAKE(dev_priv)) {
1008 dev_priv->display.funcs.fdi = &ilk_funcs;
1009 } else if (IS_SANDYBRIDGE(dev_priv)) {
1010 dev_priv->display.funcs.fdi = &gen6_funcs;
1011 } else if (IS_IVYBRIDGE(dev_priv)) {
1012 /* FIXME: detect B0+ stepping and use auto training */
1013 dev_priv->display.funcs.fdi = &ivb_funcs;