1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include <linux/string_helpers.h>
8 #include <drm/drm_fixed.h>
12 #include "intel_atomic.h"
13 #include "intel_crtc.h"
14 #include "intel_ddi.h"
17 #include "intel_display_types.h"
18 #include "intel_fdi.h"
19 #include "intel_fdi_regs.h"
20 #include "intel_link_bw.h"
22 struct intel_fdi_funcs {
23 void (*fdi_link_train)(struct intel_crtc *crtc,
24 const struct intel_crtc_state *crtc_state);
27 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
28 enum pipe pipe, bool state)
30 struct intel_display *display = &dev_priv->display;
33 if (HAS_DDI(display)) {
35 * DDI does not have a specific FDI_TX register.
37 * FDI is never fed from EDP transcoder
38 * so pipe->transcoder cast is fine here.
40 enum transcoder cpu_transcoder = (enum transcoder)pipe;
41 cur_state = intel_de_read(display,
42 TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE;
44 cur_state = intel_de_read(display, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE;
46 INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
47 "FDI TX state assertion failure (expected %s, current %s)\n",
48 str_on_off(state), str_on_off(cur_state));
51 void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe)
53 assert_fdi_tx(i915, pipe, true);
56 void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe)
58 assert_fdi_tx(i915, pipe, false);
61 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
62 enum pipe pipe, bool state)
64 struct intel_display *display = &dev_priv->display;
67 cur_state = intel_de_read(display, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
68 INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
69 "FDI RX state assertion failure (expected %s, current %s)\n",
70 str_on_off(state), str_on_off(cur_state));
73 void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe)
75 assert_fdi_rx(i915, pipe, true);
78 void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe)
80 assert_fdi_rx(i915, pipe, false);
83 void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
86 struct intel_display *display = &i915->display;
89 /* ILK FDI PLL is always enabled */
90 if (IS_IRONLAKE(i915))
93 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
97 cur_state = intel_de_read(display, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE;
98 INTEL_DISPLAY_STATE_WARN(display, !cur_state,
99 "FDI TX PLL assertion failure, should be active but is disabled\n");
102 static void assert_fdi_rx_pll(struct drm_i915_private *i915,
103 enum pipe pipe, bool state)
105 struct intel_display *display = &i915->display;
108 cur_state = intel_de_read(display, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
109 INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
110 "FDI RX PLL assertion failure (expected %s, current %s)\n",
111 str_on_off(state), str_on_off(cur_state));
114 void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
116 assert_fdi_rx_pll(i915, pipe, true);
119 void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
121 assert_fdi_rx_pll(i915, pipe, false);
124 void intel_fdi_link_train(struct intel_crtc *crtc,
125 const struct intel_crtc_state *crtc_state)
127 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
129 dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state);
133 * intel_fdi_add_affected_crtcs - add CRTCs on FDI affected by other modeset CRTCs
134 * @state: intel atomic state
136 * Add a CRTC using FDI to @state if changing another CRTC's FDI BW usage is
137 * known to affect the available FDI BW for the former CRTC. In practice this
138 * means adding CRTC B on IVYBRIDGE if its use of FDI lanes is limited (by
139 * CRTC C) and CRTC C is getting disabled.
141 * Returns 0 in case of success, or a negative error code otherwise.
143 int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state)
145 struct intel_display *display = to_intel_display(state);
146 struct drm_i915_private *i915 = to_i915(state->base.dev);
147 const struct intel_crtc_state *old_crtc_state;
148 const struct intel_crtc_state *new_crtc_state;
149 struct intel_crtc *crtc;
151 if (!IS_IVYBRIDGE(i915) || INTEL_NUM_PIPES(i915) != 3)
154 crtc = intel_crtc_for_pipe(display, PIPE_C);
155 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
159 if (!intel_crtc_needs_modeset(new_crtc_state))
162 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
163 if (!old_crtc_state->fdi_lanes)
166 crtc = intel_crtc_for_pipe(display, PIPE_B);
167 new_crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
168 if (IS_ERR(new_crtc_state))
169 return PTR_ERR(new_crtc_state);
171 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
172 if (!old_crtc_state->fdi_lanes)
175 return intel_modeset_pipes_in_mask_early(state,
176 "FDI link BW decrease on pipe C",
180 /* units of 100MHz */
181 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
183 if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
184 return crtc_state->fdi_lanes;
189 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
190 struct intel_crtc_state *pipe_config,
191 enum pipe *pipe_to_reduce)
193 struct intel_display *display = to_intel_display(dev);
194 struct drm_i915_private *dev_priv = to_i915(dev);
195 struct drm_atomic_state *state = pipe_config->uapi.state;
196 struct intel_crtc *other_crtc;
197 struct intel_crtc_state *other_crtc_state;
199 *pipe_to_reduce = pipe;
201 drm_dbg_kms(&dev_priv->drm,
202 "checking fdi config on pipe %c, lanes %i\n",
203 pipe_name(pipe), pipe_config->fdi_lanes);
204 if (pipe_config->fdi_lanes > 4) {
205 drm_dbg_kms(&dev_priv->drm,
206 "invalid fdi lane config on pipe %c: %i lanes\n",
207 pipe_name(pipe), pipe_config->fdi_lanes);
211 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
212 if (pipe_config->fdi_lanes > 2) {
213 drm_dbg_kms(&dev_priv->drm,
214 "only 2 lanes on haswell, required: %i lanes\n",
215 pipe_config->fdi_lanes);
222 if (INTEL_NUM_PIPES(dev_priv) == 2)
225 /* Ivybridge 3 pipe is really complicated */
230 if (pipe_config->fdi_lanes <= 2)
233 other_crtc = intel_crtc_for_pipe(display, PIPE_C);
235 intel_atomic_get_crtc_state(state, other_crtc);
236 if (IS_ERR(other_crtc_state))
237 return PTR_ERR(other_crtc_state);
239 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
240 drm_dbg_kms(&dev_priv->drm,
241 "invalid shared fdi lane config on pipe %c: %i lanes\n",
242 pipe_name(pipe), pipe_config->fdi_lanes);
247 if (pipe_config->fdi_lanes > 2) {
248 drm_dbg_kms(&dev_priv->drm,
249 "only 2 lanes on pipe %c: required %i lanes\n",
250 pipe_name(pipe), pipe_config->fdi_lanes);
254 other_crtc = intel_crtc_for_pipe(display, PIPE_B);
256 intel_atomic_get_crtc_state(state, other_crtc);
257 if (IS_ERR(other_crtc_state))
258 return PTR_ERR(other_crtc_state);
260 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
261 drm_dbg_kms(&dev_priv->drm,
262 "fdi link B uses too many lanes to enable link C\n");
264 *pipe_to_reduce = PIPE_B;
275 void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
277 if (IS_IRONLAKE(i915)) {
279 intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
281 i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
282 } else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
283 i915->display.fdi.pll_freq = 270000;
288 drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq);
291 int intel_fdi_link_freq(struct drm_i915_private *i915,
292 const struct intel_crtc_state *pipe_config)
295 return pipe_config->port_clock; /* SPLL */
297 return i915->display.fdi.pll_freq;
301 * intel_fdi_compute_pipe_bpp - compute pipe bpp limited by max link bpp
302 * @crtc_state: the crtc state
304 * Compute the pipe bpp limited by the CRTC's maximum link bpp. Encoders can
305 * call this function during state computation in the simple case where the
306 * link bpp will always match the pipe bpp. This is the case for all non-DP
307 * encoders, while DP encoders will use a link bpp lower than pipe bpp in case
308 * of DSC compression.
310 * Returns %true in case of success, %false if pipe bpp would need to be
311 * reduced below its valid range.
313 bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state)
315 int pipe_bpp = min(crtc_state->pipe_bpp,
316 fxp_q4_to_int(crtc_state->max_link_bpp_x16));
318 pipe_bpp = rounddown(pipe_bpp, 2 * 3);
320 if (pipe_bpp < 6 * 3)
323 crtc_state->pipe_bpp = pipe_bpp;
328 int ilk_fdi_compute_config(struct intel_crtc *crtc,
329 struct intel_crtc_state *pipe_config)
331 struct drm_device *dev = crtc->base.dev;
332 struct drm_i915_private *i915 = to_i915(dev);
333 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
334 int lane, link_bw, fdi_dotclock;
336 /* FDI is a binary signal running at ~2.7GHz, encoding
337 * each output octet as 10 bits. The actual frequency
338 * is stored as a divider into a 100MHz clock, and the
339 * mode pixel clock is stored in units of 1KHz.
340 * Hence the bw of each lane in terms of the mode signal
343 link_bw = intel_fdi_link_freq(i915, pipe_config);
345 fdi_dotclock = adjusted_mode->crtc_clock;
347 lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
348 pipe_config->pipe_bpp);
350 pipe_config->fdi_lanes = lane;
352 intel_link_compute_m_n(fxp_q4_from_int(pipe_config->pipe_bpp),
355 intel_dp_bw_fec_overhead(false),
356 &pipe_config->fdi_m_n);
361 static int intel_fdi_atomic_check_bw(struct intel_atomic_state *state,
362 struct intel_crtc *crtc,
363 struct intel_crtc_state *pipe_config,
364 struct intel_link_bw_limits *limits)
366 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
367 enum pipe pipe_to_reduce;
370 ret = ilk_check_fdi_lanes(&i915->drm, crtc->pipe, pipe_config,
375 ret = intel_link_bw_reduce_bpp(state, limits,
379 return ret ? : -EAGAIN;
383 * intel_fdi_atomic_check_link - check all modeset FDI link configuration
384 * @state: intel atomic state
385 * @limits: link BW limits
387 * Check the link configuration for all modeset FDI outputs. If the
388 * configuration is invalid @limits will be updated if possible to
389 * reduce the total BW, after which the configuration for all CRTCs in
390 * @state must be recomputed with the updated @limits.
393 * - 0 if the confugration is valid
394 * - %-EAGAIN, if the configuration is invalid and @limits got updated
395 * with fallback values with which the configuration of all CRTCs
396 * in @state must be recomputed
397 * - Other negative error, if the configuration is invalid without a
398 * fallback possibility, or the check failed for another reason
400 int intel_fdi_atomic_check_link(struct intel_atomic_state *state,
401 struct intel_link_bw_limits *limits)
403 struct intel_crtc *crtc;
404 struct intel_crtc_state *crtc_state;
407 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
410 if (!crtc_state->has_pch_encoder ||
411 !intel_crtc_needs_modeset(crtc_state) ||
412 !crtc_state->hw.enable)
415 ret = intel_fdi_atomic_check_bw(state, crtc, crtc_state, limits);
423 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
427 temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
428 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
431 drm_WARN_ON(&dev_priv->drm,
432 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
434 drm_WARN_ON(&dev_priv->drm,
435 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
438 temp &= ~FDI_BC_BIFURCATION_SELECT;
440 temp |= FDI_BC_BIFURCATION_SELECT;
442 drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
443 enable ? "en" : "dis");
444 intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
445 intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
448 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
450 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
451 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
453 switch (crtc->pipe) {
457 if (crtc_state->fdi_lanes > 2)
458 cpt_set_fdi_bc_bifurcation(dev_priv, false);
460 cpt_set_fdi_bc_bifurcation(dev_priv, true);
464 cpt_set_fdi_bc_bifurcation(dev_priv, true);
468 MISSING_CASE(crtc->pipe);
472 void intel_fdi_normal_train(struct intel_crtc *crtc)
474 struct drm_device *dev = crtc->base.dev;
475 struct drm_i915_private *dev_priv = to_i915(dev);
476 enum pipe pipe = crtc->pipe;
480 /* enable normal train */
481 reg = FDI_TX_CTL(pipe);
482 temp = intel_de_read(dev_priv, reg);
483 if (IS_IVYBRIDGE(dev_priv)) {
484 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
485 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
487 temp &= ~FDI_LINK_TRAIN_NONE;
488 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
490 intel_de_write(dev_priv, reg, temp);
492 reg = FDI_RX_CTL(pipe);
493 temp = intel_de_read(dev_priv, reg);
494 if (HAS_PCH_CPT(dev_priv)) {
495 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
496 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
498 temp &= ~FDI_LINK_TRAIN_NONE;
499 temp |= FDI_LINK_TRAIN_NONE;
501 intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
503 /* wait one idle pattern time */
504 intel_de_posting_read(dev_priv, reg);
507 /* IVB wants error correction enabled */
508 if (IS_IVYBRIDGE(dev_priv))
509 intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
512 /* The FDI link training functions for ILK/Ibexpeak. */
513 static void ilk_fdi_link_train(struct intel_crtc *crtc,
514 const struct intel_crtc_state *crtc_state)
516 struct drm_device *dev = crtc->base.dev;
517 struct drm_i915_private *dev_priv = to_i915(dev);
518 enum pipe pipe = crtc->pipe;
523 * Write the TU size bits before fdi link training, so that error
526 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
527 intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
529 /* FDI needs bits from pipe first */
530 assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder);
532 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
534 reg = FDI_RX_IMR(pipe);
535 temp = intel_de_read(dev_priv, reg);
536 temp &= ~FDI_RX_SYMBOL_LOCK;
537 temp &= ~FDI_RX_BIT_LOCK;
538 intel_de_write(dev_priv, reg, temp);
539 intel_de_read(dev_priv, reg);
542 /* enable CPU FDI TX and PCH FDI RX */
543 reg = FDI_TX_CTL(pipe);
544 temp = intel_de_read(dev_priv, reg);
545 temp &= ~FDI_DP_PORT_WIDTH_MASK;
546 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
547 temp &= ~FDI_LINK_TRAIN_NONE;
548 temp |= FDI_LINK_TRAIN_PATTERN_1;
549 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
551 reg = FDI_RX_CTL(pipe);
552 temp = intel_de_read(dev_priv, reg);
553 temp &= ~FDI_LINK_TRAIN_NONE;
554 temp |= FDI_LINK_TRAIN_PATTERN_1;
555 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
557 intel_de_posting_read(dev_priv, reg);
560 /* Ironlake workaround, enable clock pointer after FDI enable*/
561 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
562 FDI_RX_PHASE_SYNC_POINTER_OVR);
563 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
564 FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
566 reg = FDI_RX_IIR(pipe);
567 for (tries = 0; tries < 5; tries++) {
568 temp = intel_de_read(dev_priv, reg);
569 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
571 if ((temp & FDI_RX_BIT_LOCK)) {
572 drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
573 intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
578 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
581 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
582 FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
583 intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
584 FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
585 intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
588 reg = FDI_RX_IIR(pipe);
589 for (tries = 0; tries < 5; tries++) {
590 temp = intel_de_read(dev_priv, reg);
591 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
593 if (temp & FDI_RX_SYMBOL_LOCK) {
594 intel_de_write(dev_priv, reg,
595 temp | FDI_RX_SYMBOL_LOCK);
596 drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
601 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
603 drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
607 static const int snb_b_fdi_train_param[] = {
608 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
609 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
610 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
611 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
614 /* The FDI link training functions for SNB/Cougarpoint. */
615 static void gen6_fdi_link_train(struct intel_crtc *crtc,
616 const struct intel_crtc_state *crtc_state)
618 struct drm_device *dev = crtc->base.dev;
619 struct drm_i915_private *dev_priv = to_i915(dev);
620 enum pipe pipe = crtc->pipe;
625 * Write the TU size bits before fdi link training, so that error
628 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
629 intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
631 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
633 reg = FDI_RX_IMR(pipe);
634 temp = intel_de_read(dev_priv, reg);
635 temp &= ~FDI_RX_SYMBOL_LOCK;
636 temp &= ~FDI_RX_BIT_LOCK;
637 intel_de_write(dev_priv, reg, temp);
639 intel_de_posting_read(dev_priv, reg);
642 /* enable CPU FDI TX and PCH FDI RX */
643 reg = FDI_TX_CTL(pipe);
644 temp = intel_de_read(dev_priv, reg);
645 temp &= ~FDI_DP_PORT_WIDTH_MASK;
646 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
647 temp &= ~FDI_LINK_TRAIN_NONE;
648 temp |= FDI_LINK_TRAIN_PATTERN_1;
649 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
651 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
652 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
654 intel_de_write(dev_priv, FDI_RX_MISC(pipe),
655 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
657 reg = FDI_RX_CTL(pipe);
658 temp = intel_de_read(dev_priv, reg);
659 if (HAS_PCH_CPT(dev_priv)) {
660 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
661 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
663 temp &= ~FDI_LINK_TRAIN_NONE;
664 temp |= FDI_LINK_TRAIN_PATTERN_1;
666 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
668 intel_de_posting_read(dev_priv, reg);
671 for (i = 0; i < 4; i++) {
672 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
673 FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
674 intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
677 for (retry = 0; retry < 5; retry++) {
678 reg = FDI_RX_IIR(pipe);
679 temp = intel_de_read(dev_priv, reg);
680 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
681 if (temp & FDI_RX_BIT_LOCK) {
682 intel_de_write(dev_priv, reg,
683 temp | FDI_RX_BIT_LOCK);
684 drm_dbg_kms(&dev_priv->drm,
685 "FDI train 1 done.\n");
694 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
697 reg = FDI_TX_CTL(pipe);
698 temp = intel_de_read(dev_priv, reg);
699 temp &= ~FDI_LINK_TRAIN_NONE;
700 temp |= FDI_LINK_TRAIN_PATTERN_2;
701 if (IS_SANDYBRIDGE(dev_priv)) {
702 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
704 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
706 intel_de_write(dev_priv, reg, temp);
708 reg = FDI_RX_CTL(pipe);
709 temp = intel_de_read(dev_priv, reg);
710 if (HAS_PCH_CPT(dev_priv)) {
711 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
712 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
714 temp &= ~FDI_LINK_TRAIN_NONE;
715 temp |= FDI_LINK_TRAIN_PATTERN_2;
717 intel_de_write(dev_priv, reg, temp);
719 intel_de_posting_read(dev_priv, reg);
722 for (i = 0; i < 4; i++) {
723 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
724 FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
725 intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
728 for (retry = 0; retry < 5; retry++) {
729 reg = FDI_RX_IIR(pipe);
730 temp = intel_de_read(dev_priv, reg);
731 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
732 if (temp & FDI_RX_SYMBOL_LOCK) {
733 intel_de_write(dev_priv, reg,
734 temp | FDI_RX_SYMBOL_LOCK);
735 drm_dbg_kms(&dev_priv->drm,
736 "FDI train 2 done.\n");
745 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
747 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
750 /* Manual link training for Ivy Bridge A0 parts */
751 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
752 const struct intel_crtc_state *crtc_state)
754 struct drm_device *dev = crtc->base.dev;
755 struct drm_i915_private *dev_priv = to_i915(dev);
756 enum pipe pipe = crtc->pipe;
760 ivb_update_fdi_bc_bifurcation(crtc_state);
763 * Write the TU size bits before fdi link training, so that error
766 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
767 intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
769 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
771 reg = FDI_RX_IMR(pipe);
772 temp = intel_de_read(dev_priv, reg);
773 temp &= ~FDI_RX_SYMBOL_LOCK;
774 temp &= ~FDI_RX_BIT_LOCK;
775 intel_de_write(dev_priv, reg, temp);
777 intel_de_posting_read(dev_priv, reg);
780 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
781 intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
783 /* Try each vswing and preemphasis setting twice before moving on */
784 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
785 /* disable first in case we need to retry */
786 reg = FDI_TX_CTL(pipe);
787 temp = intel_de_read(dev_priv, reg);
788 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
789 temp &= ~FDI_TX_ENABLE;
790 intel_de_write(dev_priv, reg, temp);
792 reg = FDI_RX_CTL(pipe);
793 temp = intel_de_read(dev_priv, reg);
794 temp &= ~FDI_LINK_TRAIN_AUTO;
795 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
796 temp &= ~FDI_RX_ENABLE;
797 intel_de_write(dev_priv, reg, temp);
799 /* enable CPU FDI TX and PCH FDI RX */
800 reg = FDI_TX_CTL(pipe);
801 temp = intel_de_read(dev_priv, reg);
802 temp &= ~FDI_DP_PORT_WIDTH_MASK;
803 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
804 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
805 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
806 temp |= snb_b_fdi_train_param[j/2];
807 temp |= FDI_COMPOSITE_SYNC;
808 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
810 intel_de_write(dev_priv, FDI_RX_MISC(pipe),
811 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
813 reg = FDI_RX_CTL(pipe);
814 temp = intel_de_read(dev_priv, reg);
815 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
816 temp |= FDI_COMPOSITE_SYNC;
817 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
819 intel_de_posting_read(dev_priv, reg);
820 udelay(1); /* should be 0.5us */
822 for (i = 0; i < 4; i++) {
823 reg = FDI_RX_IIR(pipe);
824 temp = intel_de_read(dev_priv, reg);
825 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
827 if (temp & FDI_RX_BIT_LOCK ||
828 (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
829 intel_de_write(dev_priv, reg,
830 temp | FDI_RX_BIT_LOCK);
831 drm_dbg_kms(&dev_priv->drm,
832 "FDI train 1 done, level %i.\n",
836 udelay(1); /* should be 0.5us */
839 drm_dbg_kms(&dev_priv->drm,
840 "FDI train 1 fail on vswing %d\n", j / 2);
845 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
846 FDI_LINK_TRAIN_NONE_IVB,
847 FDI_LINK_TRAIN_PATTERN_2_IVB);
848 intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
849 FDI_LINK_TRAIN_PATTERN_MASK_CPT,
850 FDI_LINK_TRAIN_PATTERN_2_CPT);
851 intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
852 udelay(2); /* should be 1.5us */
854 for (i = 0; i < 4; i++) {
855 reg = FDI_RX_IIR(pipe);
856 temp = intel_de_read(dev_priv, reg);
857 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
859 if (temp & FDI_RX_SYMBOL_LOCK ||
860 (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
861 intel_de_write(dev_priv, reg,
862 temp | FDI_RX_SYMBOL_LOCK);
863 drm_dbg_kms(&dev_priv->drm,
864 "FDI train 2 done, level %i.\n",
868 udelay(2); /* should be 1.5us */
871 drm_dbg_kms(&dev_priv->drm,
872 "FDI train 2 fail on vswing %d\n", j / 2);
876 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
879 /* Starting with Haswell, different DDI ports can work in FDI mode for
880 * connection to the PCH-located connectors. For this, it is necessary to train
881 * both the DDI port and PCH receiver for the desired DDI buffer settings.
883 * The recommended port to work in FDI mode is DDI E, which we use here. Also,
884 * please note that when FDI mode is active on DDI E, it shares 2 lines with
885 * DDI A (which is used for eDP)
887 void hsw_fdi_link_train(struct intel_encoder *encoder,
888 const struct intel_crtc_state *crtc_state)
890 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
891 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
892 u32 temp, i, rx_ctl_val;
895 encoder->get_buf_trans(encoder, crtc_state, &n_entries);
897 hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
899 /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
900 * mode set "sequence for CRT port" document:
901 * - TP1 to TP2 time with the default value
904 * WaFDIAutoLinkSetTimingOverrride:hsw
906 intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
907 FDI_RX_PWRDN_LANE1_VAL(2) |
908 FDI_RX_PWRDN_LANE0_VAL(2) |
909 FDI_RX_TP1_TO_TP2_48 |
910 FDI_RX_FDI_DELAY_90);
912 /* Enable the PCH Receiver FDI PLL */
913 rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
915 FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
916 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
917 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
920 /* Switch from Rawclk to PCDclk */
921 rx_ctl_val |= FDI_PCDCLK;
922 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
924 /* Configure Port Clock Select */
925 drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
926 intel_ddi_enable_clock(encoder, crtc_state);
928 /* Start the training iterating through available voltages and emphasis,
929 * testing each value twice. */
930 for (i = 0; i < n_entries * 2; i++) {
931 /* Configure DP_TP_CTL with auto-training */
932 intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
933 DP_TP_CTL_FDI_AUTOTRAIN |
934 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
935 DP_TP_CTL_LINK_TRAIN_PAT1 |
938 /* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
939 * DDI E does not support port reversal, the functionality is
940 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
941 * port reversal bit */
942 intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
944 ((crtc_state->fdi_lanes - 1) << 1) |
945 DDI_BUF_TRANS_SELECT(i / 2));
946 intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
950 /* Program PCH FDI Receiver TU */
951 intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
953 /* Enable PCH FDI Receiver with auto-training */
954 rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
955 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
956 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
958 /* Wait for FDI receiver lane calibration */
961 /* Unset FDI_RX_MISC pwrdn lanes */
962 intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
963 FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0);
964 intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
966 /* Wait for FDI auto training time */
969 temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
970 if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
971 drm_dbg_kms(&dev_priv->drm,
972 "FDI link training done on step %d\n", i);
977 * Leave things enabled even if we failed to train FDI.
978 * Results in less fireworks from the state checker.
980 if (i == n_entries * 2 - 1) {
981 drm_err(&dev_priv->drm, "FDI link training failed!\n");
985 rx_ctl_val &= ~FDI_RX_ENABLE;
986 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
987 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
989 intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
990 intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
992 /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
993 intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0);
994 intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
996 intel_wait_ddi_buf_idle(dev_priv, PORT_E);
998 /* Reset FDI_RX_MISC pwrdn lanes */
999 intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
1000 FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
1001 FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
1002 intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
1005 /* Enable normal pixel sending for FDI */
1006 intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
1007 DP_TP_CTL_FDI_AUTOTRAIN |
1008 DP_TP_CTL_LINK_TRAIN_NORMAL |
1009 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
1013 void hsw_fdi_disable(struct intel_encoder *encoder)
1015 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1018 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
1019 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
1020 * step 13 is the correct place for it. Step 18 is where it was
1021 * originally before the BUN.
1023 intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0);
1024 intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
1025 intel_wait_ddi_buf_idle(dev_priv, PORT_E);
1026 intel_ddi_disable_clock(encoder);
1027 intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
1028 FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
1029 FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
1030 intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0);
1031 intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0);
1034 void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
1036 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1037 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1038 enum pipe pipe = crtc->pipe;
1042 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1043 reg = FDI_RX_CTL(pipe);
1044 temp = intel_de_read(dev_priv, reg);
1045 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
1046 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
1047 temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
1048 intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
1050 intel_de_posting_read(dev_priv, reg);
1053 /* Switch from Rawclk to PCDclk */
1054 intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK);
1055 intel_de_posting_read(dev_priv, reg);
1058 /* Enable CPU FDI TX PLL, always on for Ironlake */
1059 reg = FDI_TX_CTL(pipe);
1060 temp = intel_de_read(dev_priv, reg);
1061 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
1062 intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
1064 intel_de_posting_read(dev_priv, reg);
1069 void ilk_fdi_pll_disable(struct intel_crtc *crtc)
1071 struct drm_device *dev = crtc->base.dev;
1072 struct drm_i915_private *dev_priv = to_i915(dev);
1073 enum pipe pipe = crtc->pipe;
1075 /* Switch from PCDclk to Rawclk */
1076 intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0);
1078 /* Disable CPU FDI TX PLL */
1079 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0);
1080 intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
1083 /* Wait for the clocks to turn off. */
1084 intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0);
1085 intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
1089 void ilk_fdi_disable(struct intel_crtc *crtc)
1091 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1092 enum pipe pipe = crtc->pipe;
1096 /* disable CPU FDI tx and PCH FDI rx */
1097 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0);
1098 intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
1100 reg = FDI_RX_CTL(pipe);
1101 temp = intel_de_read(dev_priv, reg);
1102 temp &= ~(0x7 << 16);
1103 temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
1104 intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
1106 intel_de_posting_read(dev_priv, reg);
1109 /* Ironlake workaround, disable clock pointer after downing FDI */
1110 if (HAS_PCH_IBX(dev_priv))
1111 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
1112 FDI_RX_PHASE_SYNC_POINTER_OVR);
1114 /* still set train pattern 1 */
1115 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
1116 FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1);
1118 reg = FDI_RX_CTL(pipe);
1119 temp = intel_de_read(dev_priv, reg);
1120 if (HAS_PCH_CPT(dev_priv)) {
1121 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1122 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
1124 temp &= ~FDI_LINK_TRAIN_NONE;
1125 temp |= FDI_LINK_TRAIN_PATTERN_1;
1127 /* BPC in FDI rx is consistent with that in TRANSCONF */
1128 temp &= ~(0x07 << 16);
1129 temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
1130 intel_de_write(dev_priv, reg, temp);
1132 intel_de_posting_read(dev_priv, reg);
1136 static const struct intel_fdi_funcs ilk_funcs = {
1137 .fdi_link_train = ilk_fdi_link_train,
1140 static const struct intel_fdi_funcs gen6_funcs = {
1141 .fdi_link_train = gen6_fdi_link_train,
1144 static const struct intel_fdi_funcs ivb_funcs = {
1145 .fdi_link_train = ivb_manual_fdi_link_train,
1149 intel_fdi_init_hook(struct drm_i915_private *dev_priv)
1151 if (IS_IRONLAKE(dev_priv)) {
1152 dev_priv->display.funcs.fdi = &ilk_funcs;
1153 } else if (IS_SANDYBRIDGE(dev_priv)) {
1154 dev_priv->display.funcs.fdi = &gen6_funcs;
1155 } else if (IS_IVYBRIDGE(dev_priv)) {
1156 /* FIXME: detect B0+ stepping and use auto training */
1157 dev_priv->display.funcs.fdi = &ivb_funcs;