1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include <linux/string_helpers.h>
9 #include "intel_atomic.h"
10 #include "intel_crtc.h"
11 #include "intel_ddi.h"
13 #include "intel_display_types.h"
14 #include "intel_fdi.h"
15 #include "intel_fdi_regs.h"
16 #include "intel_link_bw.h"
18 struct intel_fdi_funcs {
19 void (*fdi_link_train)(struct intel_crtc *crtc,
20 const struct intel_crtc_state *crtc_state);
23 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
24 enum pipe pipe, bool state)
28 if (HAS_DDI(dev_priv)) {
30 * DDI does not have a specific FDI_TX register.
32 * FDI is never fed from EDP transcoder
33 * so pipe->transcoder cast is fine here.
35 enum transcoder cpu_transcoder = (enum transcoder)pipe;
36 cur_state = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE;
38 cur_state = intel_de_read(dev_priv, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE;
40 I915_STATE_WARN(dev_priv, cur_state != state,
41 "FDI TX state assertion failure (expected %s, current %s)\n",
42 str_on_off(state), str_on_off(cur_state));
45 void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe)
47 assert_fdi_tx(i915, pipe, true);
50 void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe)
52 assert_fdi_tx(i915, pipe, false);
55 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
56 enum pipe pipe, bool state)
60 cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
61 I915_STATE_WARN(dev_priv, cur_state != state,
62 "FDI RX state assertion failure (expected %s, current %s)\n",
63 str_on_off(state), str_on_off(cur_state));
66 void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe)
68 assert_fdi_rx(i915, pipe, true);
71 void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe)
73 assert_fdi_rx(i915, pipe, false);
76 void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
81 /* ILK FDI PLL is always enabled */
82 if (IS_IRONLAKE(i915))
85 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
89 cur_state = intel_de_read(i915, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE;
90 I915_STATE_WARN(i915, !cur_state,
91 "FDI TX PLL assertion failure, should be active but is disabled\n");
94 static void assert_fdi_rx_pll(struct drm_i915_private *i915,
95 enum pipe pipe, bool state)
99 cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
100 I915_STATE_WARN(i915, cur_state != state,
101 "FDI RX PLL assertion failure (expected %s, current %s)\n",
102 str_on_off(state), str_on_off(cur_state));
105 void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
107 assert_fdi_rx_pll(i915, pipe, true);
110 void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
112 assert_fdi_rx_pll(i915, pipe, false);
115 void intel_fdi_link_train(struct intel_crtc *crtc,
116 const struct intel_crtc_state *crtc_state)
118 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
120 dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state);
124 * intel_fdi_add_affected_crtcs - add CRTCs on FDI affected by other modeset CRTCs
125 * @state: intel atomic state
127 * Add a CRTC using FDI to @state if changing another CRTC's FDI BW usage is
128 * known to affect the available FDI BW for the former CRTC. In practice this
129 * means adding CRTC B on IVYBRIDGE if its use of FDI lanes is limited (by
130 * CRTC C) and CRTC C is getting disabled.
132 * Returns 0 in case of success, or a negative error code otherwise.
134 int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state)
136 struct drm_i915_private *i915 = to_i915(state->base.dev);
137 const struct intel_crtc_state *old_crtc_state;
138 const struct intel_crtc_state *new_crtc_state;
139 struct intel_crtc *crtc;
141 if (!IS_IVYBRIDGE(i915) || INTEL_NUM_PIPES(i915) != 3)
144 crtc = intel_crtc_for_pipe(i915, PIPE_C);
145 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
149 if (!intel_crtc_needs_modeset(new_crtc_state))
152 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
153 if (!old_crtc_state->fdi_lanes)
156 crtc = intel_crtc_for_pipe(i915, PIPE_B);
157 new_crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
158 if (IS_ERR(new_crtc_state))
159 return PTR_ERR(new_crtc_state);
161 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
162 if (!old_crtc_state->fdi_lanes)
165 return intel_modeset_pipes_in_mask_early(state,
166 "FDI link BW decrease on pipe C",
170 /* units of 100MHz */
171 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
173 if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
174 return crtc_state->fdi_lanes;
179 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
180 struct intel_crtc_state *pipe_config,
181 enum pipe *pipe_to_reduce)
183 struct drm_i915_private *dev_priv = to_i915(dev);
184 struct drm_atomic_state *state = pipe_config->uapi.state;
185 struct intel_crtc *other_crtc;
186 struct intel_crtc_state *other_crtc_state;
188 *pipe_to_reduce = pipe;
190 drm_dbg_kms(&dev_priv->drm,
191 "checking fdi config on pipe %c, lanes %i\n",
192 pipe_name(pipe), pipe_config->fdi_lanes);
193 if (pipe_config->fdi_lanes > 4) {
194 drm_dbg_kms(&dev_priv->drm,
195 "invalid fdi lane config on pipe %c: %i lanes\n",
196 pipe_name(pipe), pipe_config->fdi_lanes);
200 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
201 if (pipe_config->fdi_lanes > 2) {
202 drm_dbg_kms(&dev_priv->drm,
203 "only 2 lanes on haswell, required: %i lanes\n",
204 pipe_config->fdi_lanes);
211 if (INTEL_NUM_PIPES(dev_priv) == 2)
214 /* Ivybridge 3 pipe is really complicated */
219 if (pipe_config->fdi_lanes <= 2)
222 other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_C);
224 intel_atomic_get_crtc_state(state, other_crtc);
225 if (IS_ERR(other_crtc_state))
226 return PTR_ERR(other_crtc_state);
228 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
229 drm_dbg_kms(&dev_priv->drm,
230 "invalid shared fdi lane config on pipe %c: %i lanes\n",
231 pipe_name(pipe), pipe_config->fdi_lanes);
236 if (pipe_config->fdi_lanes > 2) {
237 drm_dbg_kms(&dev_priv->drm,
238 "only 2 lanes on pipe %c: required %i lanes\n",
239 pipe_name(pipe), pipe_config->fdi_lanes);
243 other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_B);
245 intel_atomic_get_crtc_state(state, other_crtc);
246 if (IS_ERR(other_crtc_state))
247 return PTR_ERR(other_crtc_state);
249 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
250 drm_dbg_kms(&dev_priv->drm,
251 "fdi link B uses too many lanes to enable link C\n");
253 *pipe_to_reduce = PIPE_B;
264 void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
266 if (IS_IRONLAKE(i915)) {
268 intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
270 i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
271 } else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
272 i915->display.fdi.pll_freq = 270000;
277 drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq);
280 int intel_fdi_link_freq(struct drm_i915_private *i915,
281 const struct intel_crtc_state *pipe_config)
284 return pipe_config->port_clock; /* SPLL */
286 return i915->display.fdi.pll_freq;
290 * intel_fdi_compute_pipe_bpp - compute pipe bpp limited by max link bpp
291 * @crtc_state: the crtc state
293 * Compute the pipe bpp limited by the CRTC's maximum link bpp. Encoders can
294 * call this function during state computation in the simple case where the
295 * link bpp will always match the pipe bpp. This is the case for all non-DP
296 * encoders, while DP encoders will use a link bpp lower than pipe bpp in case
297 * of DSC compression.
299 * Returns %true in case of success, %false if pipe bpp would need to be
300 * reduced below its valid range.
302 bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state)
304 int pipe_bpp = min(crtc_state->pipe_bpp,
305 to_bpp_int(crtc_state->max_link_bpp_x16));
307 pipe_bpp = rounddown(pipe_bpp, 2 * 3);
309 if (pipe_bpp < 6 * 3)
312 crtc_state->pipe_bpp = pipe_bpp;
317 int ilk_fdi_compute_config(struct intel_crtc *crtc,
318 struct intel_crtc_state *pipe_config)
320 struct drm_device *dev = crtc->base.dev;
321 struct drm_i915_private *i915 = to_i915(dev);
322 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
323 int lane, link_bw, fdi_dotclock;
325 /* FDI is a binary signal running at ~2.7GHz, encoding
326 * each output octet as 10 bits. The actual frequency
327 * is stored as a divider into a 100MHz clock, and the
328 * mode pixel clock is stored in units of 1KHz.
329 * Hence the bw of each lane in terms of the mode signal
332 link_bw = intel_fdi_link_freq(i915, pipe_config);
334 fdi_dotclock = adjusted_mode->crtc_clock;
336 lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
337 pipe_config->pipe_bpp);
339 pipe_config->fdi_lanes = lane;
341 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
342 link_bw, &pipe_config->fdi_m_n, false);
347 static int intel_fdi_atomic_check_bw(struct intel_atomic_state *state,
348 struct intel_crtc *crtc,
349 struct intel_crtc_state *pipe_config,
350 struct intel_link_bw_limits *limits)
352 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
353 enum pipe pipe_to_reduce;
356 ret = ilk_check_fdi_lanes(&i915->drm, crtc->pipe, pipe_config,
361 ret = intel_link_bw_reduce_bpp(state, limits,
365 return ret ? : -EAGAIN;
369 * intel_fdi_atomic_check_link - check all modeset FDI link configuration
370 * @state: intel atomic state
371 * @limits: link BW limits
373 * Check the link configuration for all modeset FDI outputs. If the
374 * configuration is invalid @limits will be updated if possible to
375 * reduce the total BW, after which the configuration for all CRTCs in
376 * @state must be recomputed with the updated @limits.
379 * - 0 if the confugration is valid
380 * - %-EAGAIN, if the configuration is invalid and @limits got updated
381 * with fallback values with which the configuration of all CRTCs
382 * in @state must be recomputed
383 * - Other negative error, if the configuration is invalid without a
384 * fallback possibility, or the check failed for another reason
386 int intel_fdi_atomic_check_link(struct intel_atomic_state *state,
387 struct intel_link_bw_limits *limits)
389 struct intel_crtc *crtc;
390 struct intel_crtc_state *crtc_state;
393 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
396 if (!crtc_state->has_pch_encoder ||
397 !intel_crtc_needs_modeset(crtc_state) ||
398 !crtc_state->hw.enable)
401 ret = intel_fdi_atomic_check_bw(state, crtc, crtc_state, limits);
409 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
413 temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
414 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
417 drm_WARN_ON(&dev_priv->drm,
418 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
420 drm_WARN_ON(&dev_priv->drm,
421 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
424 temp &= ~FDI_BC_BIFURCATION_SELECT;
426 temp |= FDI_BC_BIFURCATION_SELECT;
428 drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
429 enable ? "en" : "dis");
430 intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
431 intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
434 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
436 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
437 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
439 switch (crtc->pipe) {
443 if (crtc_state->fdi_lanes > 2)
444 cpt_set_fdi_bc_bifurcation(dev_priv, false);
446 cpt_set_fdi_bc_bifurcation(dev_priv, true);
450 cpt_set_fdi_bc_bifurcation(dev_priv, true);
454 MISSING_CASE(crtc->pipe);
458 void intel_fdi_normal_train(struct intel_crtc *crtc)
460 struct drm_device *dev = crtc->base.dev;
461 struct drm_i915_private *dev_priv = to_i915(dev);
462 enum pipe pipe = crtc->pipe;
466 /* enable normal train */
467 reg = FDI_TX_CTL(pipe);
468 temp = intel_de_read(dev_priv, reg);
469 if (IS_IVYBRIDGE(dev_priv)) {
470 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
471 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
473 temp &= ~FDI_LINK_TRAIN_NONE;
474 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
476 intel_de_write(dev_priv, reg, temp);
478 reg = FDI_RX_CTL(pipe);
479 temp = intel_de_read(dev_priv, reg);
480 if (HAS_PCH_CPT(dev_priv)) {
481 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
482 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
484 temp &= ~FDI_LINK_TRAIN_NONE;
485 temp |= FDI_LINK_TRAIN_NONE;
487 intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
489 /* wait one idle pattern time */
490 intel_de_posting_read(dev_priv, reg);
493 /* IVB wants error correction enabled */
494 if (IS_IVYBRIDGE(dev_priv))
495 intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
498 /* The FDI link training functions for ILK/Ibexpeak. */
499 static void ilk_fdi_link_train(struct intel_crtc *crtc,
500 const struct intel_crtc_state *crtc_state)
502 struct drm_device *dev = crtc->base.dev;
503 struct drm_i915_private *dev_priv = to_i915(dev);
504 enum pipe pipe = crtc->pipe;
509 * Write the TU size bits before fdi link training, so that error
512 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
513 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
515 /* FDI needs bits from pipe first */
516 assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder);
518 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
520 reg = FDI_RX_IMR(pipe);
521 temp = intel_de_read(dev_priv, reg);
522 temp &= ~FDI_RX_SYMBOL_LOCK;
523 temp &= ~FDI_RX_BIT_LOCK;
524 intel_de_write(dev_priv, reg, temp);
525 intel_de_read(dev_priv, reg);
528 /* enable CPU FDI TX and PCH FDI RX */
529 reg = FDI_TX_CTL(pipe);
530 temp = intel_de_read(dev_priv, reg);
531 temp &= ~FDI_DP_PORT_WIDTH_MASK;
532 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
533 temp &= ~FDI_LINK_TRAIN_NONE;
534 temp |= FDI_LINK_TRAIN_PATTERN_1;
535 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
537 reg = FDI_RX_CTL(pipe);
538 temp = intel_de_read(dev_priv, reg);
539 temp &= ~FDI_LINK_TRAIN_NONE;
540 temp |= FDI_LINK_TRAIN_PATTERN_1;
541 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
543 intel_de_posting_read(dev_priv, reg);
546 /* Ironlake workaround, enable clock pointer after FDI enable*/
547 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
548 FDI_RX_PHASE_SYNC_POINTER_OVR);
549 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
550 FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
552 reg = FDI_RX_IIR(pipe);
553 for (tries = 0; tries < 5; tries++) {
554 temp = intel_de_read(dev_priv, reg);
555 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
557 if ((temp & FDI_RX_BIT_LOCK)) {
558 drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
559 intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
564 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
567 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
568 FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
569 intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
570 FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
571 intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
574 reg = FDI_RX_IIR(pipe);
575 for (tries = 0; tries < 5; tries++) {
576 temp = intel_de_read(dev_priv, reg);
577 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
579 if (temp & FDI_RX_SYMBOL_LOCK) {
580 intel_de_write(dev_priv, reg,
581 temp | FDI_RX_SYMBOL_LOCK);
582 drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
587 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
589 drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
593 static const int snb_b_fdi_train_param[] = {
594 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
595 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
596 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
597 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
600 /* The FDI link training functions for SNB/Cougarpoint. */
601 static void gen6_fdi_link_train(struct intel_crtc *crtc,
602 const struct intel_crtc_state *crtc_state)
604 struct drm_device *dev = crtc->base.dev;
605 struct drm_i915_private *dev_priv = to_i915(dev);
606 enum pipe pipe = crtc->pipe;
611 * Write the TU size bits before fdi link training, so that error
614 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
615 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
617 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
619 reg = FDI_RX_IMR(pipe);
620 temp = intel_de_read(dev_priv, reg);
621 temp &= ~FDI_RX_SYMBOL_LOCK;
622 temp &= ~FDI_RX_BIT_LOCK;
623 intel_de_write(dev_priv, reg, temp);
625 intel_de_posting_read(dev_priv, reg);
628 /* enable CPU FDI TX and PCH FDI RX */
629 reg = FDI_TX_CTL(pipe);
630 temp = intel_de_read(dev_priv, reg);
631 temp &= ~FDI_DP_PORT_WIDTH_MASK;
632 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
633 temp &= ~FDI_LINK_TRAIN_NONE;
634 temp |= FDI_LINK_TRAIN_PATTERN_1;
635 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
637 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
638 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
640 intel_de_write(dev_priv, FDI_RX_MISC(pipe),
641 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
643 reg = FDI_RX_CTL(pipe);
644 temp = intel_de_read(dev_priv, reg);
645 if (HAS_PCH_CPT(dev_priv)) {
646 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
647 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
649 temp &= ~FDI_LINK_TRAIN_NONE;
650 temp |= FDI_LINK_TRAIN_PATTERN_1;
652 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
654 intel_de_posting_read(dev_priv, reg);
657 for (i = 0; i < 4; i++) {
658 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
659 FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
660 intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
663 for (retry = 0; retry < 5; retry++) {
664 reg = FDI_RX_IIR(pipe);
665 temp = intel_de_read(dev_priv, reg);
666 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
667 if (temp & FDI_RX_BIT_LOCK) {
668 intel_de_write(dev_priv, reg,
669 temp | FDI_RX_BIT_LOCK);
670 drm_dbg_kms(&dev_priv->drm,
671 "FDI train 1 done.\n");
680 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
683 reg = FDI_TX_CTL(pipe);
684 temp = intel_de_read(dev_priv, reg);
685 temp &= ~FDI_LINK_TRAIN_NONE;
686 temp |= FDI_LINK_TRAIN_PATTERN_2;
687 if (IS_SANDYBRIDGE(dev_priv)) {
688 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
690 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
692 intel_de_write(dev_priv, reg, temp);
694 reg = FDI_RX_CTL(pipe);
695 temp = intel_de_read(dev_priv, reg);
696 if (HAS_PCH_CPT(dev_priv)) {
697 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
698 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
700 temp &= ~FDI_LINK_TRAIN_NONE;
701 temp |= FDI_LINK_TRAIN_PATTERN_2;
703 intel_de_write(dev_priv, reg, temp);
705 intel_de_posting_read(dev_priv, reg);
708 for (i = 0; i < 4; i++) {
709 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
710 FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
711 intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
714 for (retry = 0; retry < 5; retry++) {
715 reg = FDI_RX_IIR(pipe);
716 temp = intel_de_read(dev_priv, reg);
717 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
718 if (temp & FDI_RX_SYMBOL_LOCK) {
719 intel_de_write(dev_priv, reg,
720 temp | FDI_RX_SYMBOL_LOCK);
721 drm_dbg_kms(&dev_priv->drm,
722 "FDI train 2 done.\n");
731 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
733 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
736 /* Manual link training for Ivy Bridge A0 parts */
737 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
738 const struct intel_crtc_state *crtc_state)
740 struct drm_device *dev = crtc->base.dev;
741 struct drm_i915_private *dev_priv = to_i915(dev);
742 enum pipe pipe = crtc->pipe;
746 ivb_update_fdi_bc_bifurcation(crtc_state);
749 * Write the TU size bits before fdi link training, so that error
752 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
753 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
755 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
757 reg = FDI_RX_IMR(pipe);
758 temp = intel_de_read(dev_priv, reg);
759 temp &= ~FDI_RX_SYMBOL_LOCK;
760 temp &= ~FDI_RX_BIT_LOCK;
761 intel_de_write(dev_priv, reg, temp);
763 intel_de_posting_read(dev_priv, reg);
766 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
767 intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
769 /* Try each vswing and preemphasis setting twice before moving on */
770 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
771 /* disable first in case we need to retry */
772 reg = FDI_TX_CTL(pipe);
773 temp = intel_de_read(dev_priv, reg);
774 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
775 temp &= ~FDI_TX_ENABLE;
776 intel_de_write(dev_priv, reg, temp);
778 reg = FDI_RX_CTL(pipe);
779 temp = intel_de_read(dev_priv, reg);
780 temp &= ~FDI_LINK_TRAIN_AUTO;
781 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
782 temp &= ~FDI_RX_ENABLE;
783 intel_de_write(dev_priv, reg, temp);
785 /* enable CPU FDI TX and PCH FDI RX */
786 reg = FDI_TX_CTL(pipe);
787 temp = intel_de_read(dev_priv, reg);
788 temp &= ~FDI_DP_PORT_WIDTH_MASK;
789 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
790 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
791 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
792 temp |= snb_b_fdi_train_param[j/2];
793 temp |= FDI_COMPOSITE_SYNC;
794 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
796 intel_de_write(dev_priv, FDI_RX_MISC(pipe),
797 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
799 reg = FDI_RX_CTL(pipe);
800 temp = intel_de_read(dev_priv, reg);
801 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
802 temp |= FDI_COMPOSITE_SYNC;
803 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
805 intel_de_posting_read(dev_priv, reg);
806 udelay(1); /* should be 0.5us */
808 for (i = 0; i < 4; i++) {
809 reg = FDI_RX_IIR(pipe);
810 temp = intel_de_read(dev_priv, reg);
811 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
813 if (temp & FDI_RX_BIT_LOCK ||
814 (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
815 intel_de_write(dev_priv, reg,
816 temp | FDI_RX_BIT_LOCK);
817 drm_dbg_kms(&dev_priv->drm,
818 "FDI train 1 done, level %i.\n",
822 udelay(1); /* should be 0.5us */
825 drm_dbg_kms(&dev_priv->drm,
826 "FDI train 1 fail on vswing %d\n", j / 2);
831 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
832 FDI_LINK_TRAIN_NONE_IVB,
833 FDI_LINK_TRAIN_PATTERN_2_IVB);
834 intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
835 FDI_LINK_TRAIN_PATTERN_MASK_CPT,
836 FDI_LINK_TRAIN_PATTERN_2_CPT);
837 intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
838 udelay(2); /* should be 1.5us */
840 for (i = 0; i < 4; i++) {
841 reg = FDI_RX_IIR(pipe);
842 temp = intel_de_read(dev_priv, reg);
843 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
845 if (temp & FDI_RX_SYMBOL_LOCK ||
846 (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
847 intel_de_write(dev_priv, reg,
848 temp | FDI_RX_SYMBOL_LOCK);
849 drm_dbg_kms(&dev_priv->drm,
850 "FDI train 2 done, level %i.\n",
854 udelay(2); /* should be 1.5us */
857 drm_dbg_kms(&dev_priv->drm,
858 "FDI train 2 fail on vswing %d\n", j / 2);
862 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
865 /* Starting with Haswell, different DDI ports can work in FDI mode for
866 * connection to the PCH-located connectors. For this, it is necessary to train
867 * both the DDI port and PCH receiver for the desired DDI buffer settings.
869 * The recommended port to work in FDI mode is DDI E, which we use here. Also,
870 * please note that when FDI mode is active on DDI E, it shares 2 lines with
871 * DDI A (which is used for eDP)
873 void hsw_fdi_link_train(struct intel_encoder *encoder,
874 const struct intel_crtc_state *crtc_state)
876 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
877 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
878 u32 temp, i, rx_ctl_val;
881 encoder->get_buf_trans(encoder, crtc_state, &n_entries);
883 hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
885 /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
886 * mode set "sequence for CRT port" document:
887 * - TP1 to TP2 time with the default value
890 * WaFDIAutoLinkSetTimingOverrride:hsw
892 intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
893 FDI_RX_PWRDN_LANE1_VAL(2) |
894 FDI_RX_PWRDN_LANE0_VAL(2) |
895 FDI_RX_TP1_TO_TP2_48 |
896 FDI_RX_FDI_DELAY_90);
898 /* Enable the PCH Receiver FDI PLL */
899 rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
901 FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
902 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
903 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
906 /* Switch from Rawclk to PCDclk */
907 rx_ctl_val |= FDI_PCDCLK;
908 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
910 /* Configure Port Clock Select */
911 drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
912 intel_ddi_enable_clock(encoder, crtc_state);
914 /* Start the training iterating through available voltages and emphasis,
915 * testing each value twice. */
916 for (i = 0; i < n_entries * 2; i++) {
917 /* Configure DP_TP_CTL with auto-training */
918 intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
919 DP_TP_CTL_FDI_AUTOTRAIN |
920 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
921 DP_TP_CTL_LINK_TRAIN_PAT1 |
924 /* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
925 * DDI E does not support port reversal, the functionality is
926 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
927 * port reversal bit */
928 intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
930 ((crtc_state->fdi_lanes - 1) << 1) |
931 DDI_BUF_TRANS_SELECT(i / 2));
932 intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
936 /* Program PCH FDI Receiver TU */
937 intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
939 /* Enable PCH FDI Receiver with auto-training */
940 rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
941 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
942 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
944 /* Wait for FDI receiver lane calibration */
947 /* Unset FDI_RX_MISC pwrdn lanes */
948 intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
949 FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0);
950 intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
952 /* Wait for FDI auto training time */
955 temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
956 if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
957 drm_dbg_kms(&dev_priv->drm,
958 "FDI link training done on step %d\n", i);
963 * Leave things enabled even if we failed to train FDI.
964 * Results in less fireworks from the state checker.
966 if (i == n_entries * 2 - 1) {
967 drm_err(&dev_priv->drm, "FDI link training failed!\n");
971 rx_ctl_val &= ~FDI_RX_ENABLE;
972 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
973 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
975 intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
976 intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
978 /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
979 intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0);
980 intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
982 intel_wait_ddi_buf_idle(dev_priv, PORT_E);
984 /* Reset FDI_RX_MISC pwrdn lanes */
985 intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
986 FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
987 FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
988 intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
991 /* Enable normal pixel sending for FDI */
992 intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
993 DP_TP_CTL_FDI_AUTOTRAIN |
994 DP_TP_CTL_LINK_TRAIN_NORMAL |
995 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
999 void hsw_fdi_disable(struct intel_encoder *encoder)
1001 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1004 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
1005 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
1006 * step 13 is the correct place for it. Step 18 is where it was
1007 * originally before the BUN.
1009 intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0);
1010 intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
1011 intel_wait_ddi_buf_idle(dev_priv, PORT_E);
1012 intel_ddi_disable_clock(encoder);
1013 intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
1014 FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
1015 FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
1016 intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0);
1017 intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0);
1020 void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
1022 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1023 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1024 enum pipe pipe = crtc->pipe;
1028 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1029 reg = FDI_RX_CTL(pipe);
1030 temp = intel_de_read(dev_priv, reg);
1031 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
1032 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
1033 temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
1034 intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
1036 intel_de_posting_read(dev_priv, reg);
1039 /* Switch from Rawclk to PCDclk */
1040 intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK);
1041 intel_de_posting_read(dev_priv, reg);
1044 /* Enable CPU FDI TX PLL, always on for Ironlake */
1045 reg = FDI_TX_CTL(pipe);
1046 temp = intel_de_read(dev_priv, reg);
1047 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
1048 intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
1050 intel_de_posting_read(dev_priv, reg);
1055 void ilk_fdi_pll_disable(struct intel_crtc *crtc)
1057 struct drm_device *dev = crtc->base.dev;
1058 struct drm_i915_private *dev_priv = to_i915(dev);
1059 enum pipe pipe = crtc->pipe;
1061 /* Switch from PCDclk to Rawclk */
1062 intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0);
1064 /* Disable CPU FDI TX PLL */
1065 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0);
1066 intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
1069 /* Wait for the clocks to turn off. */
1070 intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0);
1071 intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
1075 void ilk_fdi_disable(struct intel_crtc *crtc)
1077 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1078 enum pipe pipe = crtc->pipe;
1082 /* disable CPU FDI tx and PCH FDI rx */
1083 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0);
1084 intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
1086 reg = FDI_RX_CTL(pipe);
1087 temp = intel_de_read(dev_priv, reg);
1088 temp &= ~(0x7 << 16);
1089 temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
1090 intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
1092 intel_de_posting_read(dev_priv, reg);
1095 /* Ironlake workaround, disable clock pointer after downing FDI */
1096 if (HAS_PCH_IBX(dev_priv))
1097 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
1098 FDI_RX_PHASE_SYNC_POINTER_OVR);
1100 /* still set train pattern 1 */
1101 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
1102 FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1);
1104 reg = FDI_RX_CTL(pipe);
1105 temp = intel_de_read(dev_priv, reg);
1106 if (HAS_PCH_CPT(dev_priv)) {
1107 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1108 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
1110 temp &= ~FDI_LINK_TRAIN_NONE;
1111 temp |= FDI_LINK_TRAIN_PATTERN_1;
1113 /* BPC in FDI rx is consistent with that in TRANSCONF */
1114 temp &= ~(0x07 << 16);
1115 temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
1116 intel_de_write(dev_priv, reg, temp);
1118 intel_de_posting_read(dev_priv, reg);
1122 static const struct intel_fdi_funcs ilk_funcs = {
1123 .fdi_link_train = ilk_fdi_link_train,
1126 static const struct intel_fdi_funcs gen6_funcs = {
1127 .fdi_link_train = gen6_fdi_link_train,
1130 static const struct intel_fdi_funcs ivb_funcs = {
1131 .fdi_link_train = ivb_manual_fdi_link_train,
1135 intel_fdi_init_hook(struct drm_i915_private *dev_priv)
1137 if (IS_IRONLAKE(dev_priv)) {
1138 dev_priv->display.funcs.fdi = &ilk_funcs;
1139 } else if (IS_SANDYBRIDGE(dev_priv)) {
1140 dev_priv->display.funcs.fdi = &gen6_funcs;
1141 } else if (IS_IVYBRIDGE(dev_priv)) {
1142 /* FIXME: detect B0+ stepping and use auto training */
1143 dev_priv->display.funcs.fdi = &ivb_funcs;