2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
44 /* Compliance test status bits */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
55 static const struct dp_link_dpll gen4_dpll[] = {
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
62 static const struct dp_link_dpll pch_dpll[] = {
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
69 static const struct dp_link_dpll vlv_dpll[] = {
71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
80 static const struct dp_link_dpll chv_dpll[] = {
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { 270000, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { 540000, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
94 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
96 static const int skl_rates[] = { 162000, 216000, 270000,
97 324000, 432000, 540000 };
98 static const int default_rates[] = { 162000, 270000, 540000 };
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
107 static bool is_edp(struct intel_dp *intel_dp)
109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
114 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
118 return intel_dig_port->base.base.dev;
121 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
126 static void intel_dp_link_down(struct intel_dp *intel_dp);
127 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
128 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
129 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
130 static void vlv_steal_power_sequencer(struct drm_device *dev,
133 static unsigned int intel_dp_unused_lane_mask(int lane_count)
135 return ~((1 << lane_count) - 1) & 0xf;
139 intel_dp_max_link_bw(struct intel_dp *intel_dp)
141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
151 max_link_bw = DP_LINK_BW_1_62;
157 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
160 u8 source_max, sink_max;
162 source_max = intel_dig_port->max_lanes;
163 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
165 return min(source_max, sink_max);
169 * The units on the numbers in the next two are... bizarre. Examples will
170 * make it clearer; this one parallels an example in the eDP spec.
172 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
174 * 270000 * 1 * 8 / 10 == 216000
176 * The actual data capacity of that configuration is 2.16Gbit/s, so the
177 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
178 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
179 * 119000. At 18bpp that's 2142000 kilobits per second.
181 * Thus the strange-looking division by 10 in intel_dp_link_required, to
182 * get the result in decakilobits instead of kilobits.
186 intel_dp_link_required(int pixel_clock, int bpp)
188 return (pixel_clock * bpp + 9) / 10;
192 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
194 return (max_link_clock * max_lanes * 8) / 10;
197 static enum drm_mode_status
198 intel_dp_mode_valid(struct drm_connector *connector,
199 struct drm_display_mode *mode)
201 struct intel_dp *intel_dp = intel_attached_dp(connector);
202 struct intel_connector *intel_connector = to_intel_connector(connector);
203 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
204 int target_clock = mode->clock;
205 int max_rate, mode_rate, max_lanes, max_link_clock;
206 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
208 if (is_edp(intel_dp) && fixed_mode) {
209 if (mode->hdisplay > fixed_mode->hdisplay)
212 if (mode->vdisplay > fixed_mode->vdisplay)
215 target_clock = fixed_mode->clock;
218 max_link_clock = intel_dp_max_link_rate(intel_dp);
219 max_lanes = intel_dp_max_lane_count(intel_dp);
221 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
222 mode_rate = intel_dp_link_required(target_clock, 18);
224 if (mode_rate > max_rate || target_clock > max_dotclk)
225 return MODE_CLOCK_HIGH;
227 if (mode->clock < 10000)
228 return MODE_CLOCK_LOW;
230 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
231 return MODE_H_ILLEGAL;
236 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
243 for (i = 0; i < src_bytes; i++)
244 v |= ((uint32_t) src[i]) << ((3-i) * 8);
248 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
253 for (i = 0; i < dst_bytes; i++)
254 dst[i] = src >> ((3-i) * 8);
258 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
259 struct intel_dp *intel_dp);
261 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
262 struct intel_dp *intel_dp);
264 static void pps_lock(struct intel_dp *intel_dp)
266 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
267 struct intel_encoder *encoder = &intel_dig_port->base;
268 struct drm_device *dev = encoder->base.dev;
269 struct drm_i915_private *dev_priv = dev->dev_private;
270 enum intel_display_power_domain power_domain;
273 * See vlv_power_sequencer_reset() why we need
274 * a power domain reference here.
276 power_domain = intel_display_port_aux_power_domain(encoder);
277 intel_display_power_get(dev_priv, power_domain);
279 mutex_lock(&dev_priv->pps_mutex);
282 static void pps_unlock(struct intel_dp *intel_dp)
284 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
285 struct intel_encoder *encoder = &intel_dig_port->base;
286 struct drm_device *dev = encoder->base.dev;
287 struct drm_i915_private *dev_priv = dev->dev_private;
288 enum intel_display_power_domain power_domain;
290 mutex_unlock(&dev_priv->pps_mutex);
292 power_domain = intel_display_port_aux_power_domain(encoder);
293 intel_display_power_put(dev_priv, power_domain);
297 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
299 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
300 struct drm_device *dev = intel_dig_port->base.base.dev;
301 struct drm_i915_private *dev_priv = dev->dev_private;
302 enum pipe pipe = intel_dp->pps_pipe;
303 bool pll_enabled, release_cl_override = false;
304 enum dpio_phy phy = DPIO_PHY(pipe);
305 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
308 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
309 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
310 pipe_name(pipe), port_name(intel_dig_port->port)))
313 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
314 pipe_name(pipe), port_name(intel_dig_port->port));
316 /* Preserve the BIOS-computed detected bit. This is
317 * supposed to be read-only.
319 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
320 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
321 DP |= DP_PORT_WIDTH(1);
322 DP |= DP_LINK_TRAIN_PAT_1;
324 if (IS_CHERRYVIEW(dev))
325 DP |= DP_PIPE_SELECT_CHV(pipe);
326 else if (pipe == PIPE_B)
327 DP |= DP_PIPEB_SELECT;
329 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
332 * The DPLL for the pipe must be enabled for this to work.
333 * So enable temporarily it if it's not already enabled.
336 release_cl_override = IS_CHERRYVIEW(dev) &&
337 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
339 if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
340 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
341 DRM_ERROR("Failed to force on pll for pipe %c!\n",
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
353 I915_WRITE(intel_dp->output_reg, DP);
354 POSTING_READ(intel_dp->output_reg);
356 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357 POSTING_READ(intel_dp->output_reg);
359 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360 POSTING_READ(intel_dp->output_reg);
363 vlv_force_pll_off(dev, pipe);
365 if (release_cl_override)
366 chv_phy_powergate_ch(dev_priv, phy, ch, false);
371 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
374 struct drm_device *dev = intel_dig_port->base.base.dev;
375 struct drm_i915_private *dev_priv = dev->dev_private;
376 struct intel_encoder *encoder;
377 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
380 lockdep_assert_held(&dev_priv->pps_mutex);
382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp));
385 if (intel_dp->pps_pipe != INVALID_PIPE)
386 return intel_dp->pps_pipe;
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
392 for_each_intel_encoder(dev, encoder) {
393 struct intel_dp *tmp;
395 if (encoder->type != INTEL_OUTPUT_EDP)
398 tmp = enc_to_intel_dp(&encoder->base);
400 if (tmp->pps_pipe != INVALID_PIPE)
401 pipes &= ~(1 << tmp->pps_pipe);
405 * Didn't find one. This should not happen since there
406 * are two power sequencers and up to two eDP ports.
408 if (WARN_ON(pipes == 0))
411 pipe = ffs(pipes) - 1;
413 vlv_steal_power_sequencer(dev, pipe);
414 intel_dp->pps_pipe = pipe;
416 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
417 pipe_name(intel_dp->pps_pipe),
418 port_name(intel_dig_port->port));
420 /* init power sequencer on this pipe and port */
421 intel_dp_init_panel_power_sequencer(dev, intel_dp);
422 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
425 * Even vdd force doesn't work until we've made
426 * the power sequencer lock in on the port.
428 vlv_power_sequencer_kick(intel_dp);
430 return intel_dp->pps_pipe;
433 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
436 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
439 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
442 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
445 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
448 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
455 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
457 vlv_pipe_check pipe_check)
461 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
462 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
463 PANEL_PORT_SELECT_MASK;
465 if (port_sel != PANEL_PORT_SELECT_VLV(port))
468 if (!pipe_check(dev_priv, pipe))
478 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
480 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
481 struct drm_device *dev = intel_dig_port->base.base.dev;
482 struct drm_i915_private *dev_priv = dev->dev_private;
483 enum port port = intel_dig_port->port;
485 lockdep_assert_held(&dev_priv->pps_mutex);
487 /* try to find a pipe with this port selected */
488 /* first pick one where the panel is on */
489 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
491 /* didn't find one? pick one where vdd is on */
492 if (intel_dp->pps_pipe == INVALID_PIPE)
493 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
494 vlv_pipe_has_vdd_on);
495 /* didn't find one? pick one with just the correct port */
496 if (intel_dp->pps_pipe == INVALID_PIPE)
497 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
500 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
501 if (intel_dp->pps_pipe == INVALID_PIPE) {
502 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
507 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
508 port_name(port), pipe_name(intel_dp->pps_pipe));
510 intel_dp_init_panel_power_sequencer(dev, intel_dp);
511 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
514 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
516 struct drm_device *dev = dev_priv->dev;
517 struct intel_encoder *encoder;
519 if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
523 * We can't grab pps_mutex here due to deadlock with power_domain
524 * mutex when power_domain functions are called while holding pps_mutex.
525 * That also means that in order to use pps_pipe the code needs to
526 * hold both a power domain reference and pps_mutex, and the power domain
527 * reference get/put must be done while _not_ holding pps_mutex.
528 * pps_{lock,unlock}() do these steps in the correct order, so one
529 * should use them always.
532 for_each_intel_encoder(dev, encoder) {
533 struct intel_dp *intel_dp;
535 if (encoder->type != INTEL_OUTPUT_EDP)
538 intel_dp = enc_to_intel_dp(&encoder->base);
539 intel_dp->pps_pipe = INVALID_PIPE;
544 _pp_ctrl_reg(struct intel_dp *intel_dp)
546 struct drm_device *dev = intel_dp_to_dev(intel_dp);
549 return BXT_PP_CONTROL(0);
550 else if (HAS_PCH_SPLIT(dev))
551 return PCH_PP_CONTROL;
553 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
557 _pp_stat_reg(struct intel_dp *intel_dp)
559 struct drm_device *dev = intel_dp_to_dev(intel_dp);
562 return BXT_PP_STATUS(0);
563 else if (HAS_PCH_SPLIT(dev))
564 return PCH_PP_STATUS;
566 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
569 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
570 This function only applicable when panel PM state is not to be tracked */
571 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
574 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
576 struct drm_device *dev = intel_dp_to_dev(intel_dp);
577 struct drm_i915_private *dev_priv = dev->dev_private;
579 if (!is_edp(intel_dp) || code != SYS_RESTART)
584 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
585 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
586 i915_reg_t pp_ctrl_reg, pp_div_reg;
589 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
590 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
591 pp_div = I915_READ(pp_div_reg);
592 pp_div &= PP_REFERENCE_DIVIDER_MASK;
594 /* 0x1F write to PP_DIV_REG sets max cycle delay */
595 I915_WRITE(pp_div_reg, pp_div | 0x1F);
596 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
597 msleep(intel_dp->panel_power_cycle_delay);
600 pps_unlock(intel_dp);
605 static bool edp_have_panel_power(struct intel_dp *intel_dp)
607 struct drm_device *dev = intel_dp_to_dev(intel_dp);
608 struct drm_i915_private *dev_priv = dev->dev_private;
610 lockdep_assert_held(&dev_priv->pps_mutex);
612 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
613 intel_dp->pps_pipe == INVALID_PIPE)
616 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
619 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
621 struct drm_device *dev = intel_dp_to_dev(intel_dp);
622 struct drm_i915_private *dev_priv = dev->dev_private;
624 lockdep_assert_held(&dev_priv->pps_mutex);
626 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
627 intel_dp->pps_pipe == INVALID_PIPE)
630 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
634 intel_dp_check_edp(struct intel_dp *intel_dp)
636 struct drm_device *dev = intel_dp_to_dev(intel_dp);
637 struct drm_i915_private *dev_priv = dev->dev_private;
639 if (!is_edp(intel_dp))
642 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
643 WARN(1, "eDP powered off while attempting aux channel communication.\n");
644 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
645 I915_READ(_pp_stat_reg(intel_dp)),
646 I915_READ(_pp_ctrl_reg(intel_dp)));
651 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
653 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
654 struct drm_device *dev = intel_dig_port->base.base.dev;
655 struct drm_i915_private *dev_priv = dev->dev_private;
656 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
660 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
662 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
663 msecs_to_jiffies_timeout(10));
665 done = wait_for_atomic(C, 10) == 0;
667 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
674 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
676 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
677 struct drm_device *dev = intel_dig_port->base.base.dev;
680 * The clock divider is based off the hrawclk, and would like to run at
681 * 2MHz. So, take the hrawclk value and divide by 2 and use that
683 return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
686 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
688 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
689 struct drm_device *dev = intel_dig_port->base.base.dev;
690 struct drm_i915_private *dev_priv = dev->dev_private;
695 if (intel_dig_port->port == PORT_A) {
696 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
699 return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
703 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
705 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
706 struct drm_device *dev = intel_dig_port->base.base.dev;
707 struct drm_i915_private *dev_priv = dev->dev_private;
709 if (intel_dig_port->port == PORT_A) {
712 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
713 } else if (HAS_PCH_LPT_H(dev_priv)) {
714 /* Workaround for non-ULT HSW */
721 return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
725 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
727 return index ? 0 : 100;
730 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
733 * SKL doesn't need us to program the AUX clock divider (Hardware will
734 * derive the clock from CDCLK automatically). We still implement the
735 * get_aux_clock_divider vfunc to plug-in into the existing code.
737 return index ? 0 : 1;
740 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
743 uint32_t aux_clock_divider)
745 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
746 struct drm_device *dev = intel_dig_port->base.base.dev;
747 uint32_t precharge, timeout;
754 if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
755 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
757 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
759 return DP_AUX_CH_CTL_SEND_BUSY |
761 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
762 DP_AUX_CH_CTL_TIME_OUT_ERROR |
764 DP_AUX_CH_CTL_RECEIVE_ERROR |
765 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
766 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
767 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
770 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
775 return DP_AUX_CH_CTL_SEND_BUSY |
777 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
778 DP_AUX_CH_CTL_TIME_OUT_ERROR |
779 DP_AUX_CH_CTL_TIME_OUT_1600us |
780 DP_AUX_CH_CTL_RECEIVE_ERROR |
781 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
782 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
786 intel_dp_aux_ch(struct intel_dp *intel_dp,
787 const uint8_t *send, int send_bytes,
788 uint8_t *recv, int recv_size)
790 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
791 struct drm_device *dev = intel_dig_port->base.base.dev;
792 struct drm_i915_private *dev_priv = dev->dev_private;
793 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
794 uint32_t aux_clock_divider;
795 int i, ret, recv_bytes;
798 bool has_aux_irq = HAS_AUX_IRQ(dev);
804 * We will be called with VDD already enabled for dpcd/edid/oui reads.
805 * In such cases we want to leave VDD enabled and it's up to upper layers
806 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
809 vdd = edp_panel_vdd_on(intel_dp);
811 /* dp aux is extremely sensitive to irq latency, hence request the
812 * lowest possible wakeup latency and so prevent the cpu from going into
815 pm_qos_update_request(&dev_priv->pm_qos, 0);
817 intel_dp_check_edp(intel_dp);
819 /* Try to wait for any previous AUX channel activity */
820 for (try = 0; try < 3; try++) {
821 status = I915_READ_NOTRACE(ch_ctl);
822 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
828 static u32 last_status = -1;
829 const u32 status = I915_READ(ch_ctl);
831 if (status != last_status) {
832 WARN(1, "dp_aux_ch not started status 0x%08x\n",
834 last_status = status;
841 /* Only 5 data registers! */
842 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
847 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
848 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
853 /* Must try at least 3 times according to DP spec */
854 for (try = 0; try < 5; try++) {
855 /* Load the send data into the aux channel data registers */
856 for (i = 0; i < send_bytes; i += 4)
857 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
858 intel_dp_pack_aux(send + i,
861 /* Send the command and wait for it to complete */
862 I915_WRITE(ch_ctl, send_ctl);
864 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
866 /* Clear done status and any errors */
870 DP_AUX_CH_CTL_TIME_OUT_ERROR |
871 DP_AUX_CH_CTL_RECEIVE_ERROR);
873 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
876 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
877 * 400us delay required for errors and timeouts
878 * Timeout errors from the HW already meet this
879 * requirement so skip to next iteration
881 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
882 usleep_range(400, 500);
885 if (status & DP_AUX_CH_CTL_DONE)
890 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
891 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
897 /* Check for timeout or receive error.
898 * Timeouts occur when the sink is not connected
900 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
901 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
906 /* Timeouts occur when the device isn't connected, so they're
907 * "normal" -- don't fill the kernel log with these */
908 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
909 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
914 /* Unload any bytes sent back from the other side */
915 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
916 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
919 * By BSpec: "Message sizes of 0 or >20 are not allowed."
920 * We have no idea of what happened so we return -EBUSY so
921 * drm layer takes care for the necessary retries.
923 if (recv_bytes == 0 || recv_bytes > 20) {
924 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
927 * FIXME: This patch was created on top of a series that
928 * organize the retries at drm level. There EBUSY should
929 * also take care for 1ms wait before retrying.
930 * That aux retries re-org is still needed and after that is
931 * merged we remove this sleep from here.
933 usleep_range(1000, 1500);
938 if (recv_bytes > recv_size)
939 recv_bytes = recv_size;
941 for (i = 0; i < recv_bytes; i += 4)
942 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
943 recv + i, recv_bytes - i);
947 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
950 edp_panel_vdd_off(intel_dp, false);
952 pps_unlock(intel_dp);
957 #define BARE_ADDRESS_SIZE 3
958 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
960 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
962 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
963 uint8_t txbuf[20], rxbuf[20];
964 size_t txsize, rxsize;
967 txbuf[0] = (msg->request << 4) |
968 ((msg->address >> 16) & 0xf);
969 txbuf[1] = (msg->address >> 8) & 0xff;
970 txbuf[2] = msg->address & 0xff;
971 txbuf[3] = msg->size - 1;
973 switch (msg->request & ~DP_AUX_I2C_MOT) {
974 case DP_AUX_NATIVE_WRITE:
975 case DP_AUX_I2C_WRITE:
976 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
977 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
978 rxsize = 2; /* 0 or 1 data bytes */
980 if (WARN_ON(txsize > 20))
984 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
988 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
990 msg->reply = rxbuf[0] >> 4;
993 /* Number of bytes written in a short write. */
994 ret = clamp_t(int, rxbuf[1], 0, msg->size);
996 /* Return payload size. */
1002 case DP_AUX_NATIVE_READ:
1003 case DP_AUX_I2C_READ:
1004 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1005 rxsize = msg->size + 1;
1007 if (WARN_ON(rxsize > 20))
1010 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1012 msg->reply = rxbuf[0] >> 4;
1014 * Assume happy day, and copy the data. The caller is
1015 * expected to check msg->reply before touching it.
1017 * Return payload size.
1020 memcpy(msg->buffer, rxbuf + 1, ret);
1032 static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1039 return DP_AUX_CH_CTL(port);
1042 return DP_AUX_CH_CTL(PORT_B);
1046 static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1047 enum port port, int index)
1053 return DP_AUX_CH_DATA(port, index);
1056 return DP_AUX_CH_DATA(PORT_B, index);
1060 static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1065 return DP_AUX_CH_CTL(port);
1069 return PCH_DP_AUX_CH_CTL(port);
1072 return DP_AUX_CH_CTL(PORT_A);
1076 static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1077 enum port port, int index)
1081 return DP_AUX_CH_DATA(port, index);
1085 return PCH_DP_AUX_CH_DATA(port, index);
1088 return DP_AUX_CH_DATA(PORT_A, index);
1093 * On SKL we don't have Aux for port E so we rely
1094 * on VBT to set a proper alternate aux channel.
1096 static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1098 const struct ddi_vbt_port_info *info =
1099 &dev_priv->vbt.ddi_port_info[PORT_E];
1101 switch (info->alternate_aux_channel) {
1111 MISSING_CASE(info->alternate_aux_channel);
1116 static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1120 port = skl_porte_aux_port(dev_priv);
1127 return DP_AUX_CH_CTL(port);
1130 return DP_AUX_CH_CTL(PORT_A);
1134 static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1135 enum port port, int index)
1138 port = skl_porte_aux_port(dev_priv);
1145 return DP_AUX_CH_DATA(port, index);
1148 return DP_AUX_CH_DATA(PORT_A, index);
1152 static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1155 if (INTEL_INFO(dev_priv)->gen >= 9)
1156 return skl_aux_ctl_reg(dev_priv, port);
1157 else if (HAS_PCH_SPLIT(dev_priv))
1158 return ilk_aux_ctl_reg(dev_priv, port);
1160 return g4x_aux_ctl_reg(dev_priv, port);
1163 static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1164 enum port port, int index)
1166 if (INTEL_INFO(dev_priv)->gen >= 9)
1167 return skl_aux_data_reg(dev_priv, port, index);
1168 else if (HAS_PCH_SPLIT(dev_priv))
1169 return ilk_aux_data_reg(dev_priv, port, index);
1171 return g4x_aux_data_reg(dev_priv, port, index);
1174 static void intel_aux_reg_init(struct intel_dp *intel_dp)
1176 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1177 enum port port = dp_to_dig_port(intel_dp)->port;
1180 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1181 for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1182 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1186 intel_dp_aux_fini(struct intel_dp *intel_dp)
1188 drm_dp_aux_unregister(&intel_dp->aux);
1189 kfree(intel_dp->aux.name);
1193 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1195 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1196 enum port port = intel_dig_port->port;
1199 intel_aux_reg_init(intel_dp);
1201 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1202 if (!intel_dp->aux.name)
1205 intel_dp->aux.dev = connector->base.kdev;
1206 intel_dp->aux.transfer = intel_dp_aux_transfer;
1208 DRM_DEBUG_KMS("registering %s bus for %s\n",
1210 connector->base.kdev->kobj.name);
1212 ret = drm_dp_aux_register(&intel_dp->aux);
1214 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1215 intel_dp->aux.name, ret);
1216 kfree(intel_dp->aux.name);
1224 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1226 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1228 intel_dp_aux_fini(intel_dp);
1229 intel_connector_unregister(intel_connector);
1233 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1237 memset(&pipe_config->dpll_hw_state, 0,
1238 sizeof(pipe_config->dpll_hw_state));
1240 pipe_config->ddi_pll_sel = SKL_DPLL0;
1241 pipe_config->dpll_hw_state.cfgcr1 = 0;
1242 pipe_config->dpll_hw_state.cfgcr2 = 0;
1244 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1245 switch (pipe_config->port_clock / 2) {
1247 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1251 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1255 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1259 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1262 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1263 results in CDCLK change. Need to handle the change of CDCLK by
1264 disabling pipes and re-enabling them */
1266 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1270 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1275 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1279 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1281 memset(&pipe_config->dpll_hw_state, 0,
1282 sizeof(pipe_config->dpll_hw_state));
1284 switch (pipe_config->port_clock / 2) {
1286 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1289 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1292 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1298 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1300 if (intel_dp->num_sink_rates) {
1301 *sink_rates = intel_dp->sink_rates;
1302 return intel_dp->num_sink_rates;
1305 *sink_rates = default_rates;
1307 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1310 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1312 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1313 struct drm_device *dev = dig_port->base.base.dev;
1315 /* WaDisableHBR2:skl */
1316 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
1319 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1320 (INTEL_INFO(dev)->gen >= 9))
1327 intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
1329 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1330 struct drm_device *dev = dig_port->base.base.dev;
1333 if (IS_BROXTON(dev)) {
1334 *source_rates = bxt_rates;
1335 size = ARRAY_SIZE(bxt_rates);
1336 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1337 *source_rates = skl_rates;
1338 size = ARRAY_SIZE(skl_rates);
1340 *source_rates = default_rates;
1341 size = ARRAY_SIZE(default_rates);
1344 /* This depends on the fact that 5.4 is last value in the array */
1345 if (!intel_dp_source_supports_hbr2(intel_dp))
1352 intel_dp_set_clock(struct intel_encoder *encoder,
1353 struct intel_crtc_state *pipe_config)
1355 struct drm_device *dev = encoder->base.dev;
1356 const struct dp_link_dpll *divisor = NULL;
1360 divisor = gen4_dpll;
1361 count = ARRAY_SIZE(gen4_dpll);
1362 } else if (HAS_PCH_SPLIT(dev)) {
1364 count = ARRAY_SIZE(pch_dpll);
1365 } else if (IS_CHERRYVIEW(dev)) {
1367 count = ARRAY_SIZE(chv_dpll);
1368 } else if (IS_VALLEYVIEW(dev)) {
1370 count = ARRAY_SIZE(vlv_dpll);
1373 if (divisor && count) {
1374 for (i = 0; i < count; i++) {
1375 if (pipe_config->port_clock == divisor[i].clock) {
1376 pipe_config->dpll = divisor[i].dpll;
1377 pipe_config->clock_set = true;
1384 static int intersect_rates(const int *source_rates, int source_len,
1385 const int *sink_rates, int sink_len,
1388 int i = 0, j = 0, k = 0;
1390 while (i < source_len && j < sink_len) {
1391 if (source_rates[i] == sink_rates[j]) {
1392 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1394 common_rates[k] = source_rates[i];
1398 } else if (source_rates[i] < sink_rates[j]) {
1407 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1410 const int *source_rates, *sink_rates;
1411 int source_len, sink_len;
1413 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1414 source_len = intel_dp_source_rates(intel_dp, &source_rates);
1416 return intersect_rates(source_rates, source_len,
1417 sink_rates, sink_len,
1421 static void snprintf_int_array(char *str, size_t len,
1422 const int *array, int nelem)
1428 for (i = 0; i < nelem; i++) {
1429 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1437 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1439 const int *source_rates, *sink_rates;
1440 int source_len, sink_len, common_len;
1441 int common_rates[DP_MAX_SUPPORTED_RATES];
1442 char str[128]; /* FIXME: too big for stack? */
1444 if ((drm_debug & DRM_UT_KMS) == 0)
1447 source_len = intel_dp_source_rates(intel_dp, &source_rates);
1448 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1449 DRM_DEBUG_KMS("source rates: %s\n", str);
1451 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1452 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1453 DRM_DEBUG_KMS("sink rates: %s\n", str);
1455 common_len = intel_dp_common_rates(intel_dp, common_rates);
1456 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1457 DRM_DEBUG_KMS("common rates: %s\n", str);
1460 static int rate_to_index(int find, const int *rates)
1464 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1465 if (find == rates[i])
1472 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1474 int rates[DP_MAX_SUPPORTED_RATES] = {};
1477 len = intel_dp_common_rates(intel_dp, rates);
1478 if (WARN_ON(len <= 0))
1481 return rates[rate_to_index(0, rates) - 1];
1484 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1486 return rate_to_index(rate, intel_dp->sink_rates);
1489 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1490 uint8_t *link_bw, uint8_t *rate_select)
1492 if (intel_dp->num_sink_rates) {
1495 intel_dp_rate_select(intel_dp, port_clock);
1497 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1503 intel_dp_compute_config(struct intel_encoder *encoder,
1504 struct intel_crtc_state *pipe_config)
1506 struct drm_device *dev = encoder->base.dev;
1507 struct drm_i915_private *dev_priv = dev->dev_private;
1508 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1509 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1510 enum port port = dp_to_dig_port(intel_dp)->port;
1511 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1512 struct intel_connector *intel_connector = intel_dp->attached_connector;
1513 int lane_count, clock;
1514 int min_lane_count = 1;
1515 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1516 /* Conveniently, the link BW constants become indices with a shift...*/
1520 int link_avail, link_clock;
1521 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1523 uint8_t link_bw, rate_select;
1525 common_len = intel_dp_common_rates(intel_dp, common_rates);
1527 /* No common link rates between source and sink */
1528 WARN_ON(common_len <= 0);
1530 max_clock = common_len - 1;
1532 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1533 pipe_config->has_pch_encoder = true;
1535 pipe_config->has_dp_encoder = true;
1536 pipe_config->has_drrs = false;
1537 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1539 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1540 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1543 if (INTEL_INFO(dev)->gen >= 9) {
1545 ret = skl_update_scaler_crtc(pipe_config);
1550 if (HAS_GMCH_DISPLAY(dev))
1551 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1552 intel_connector->panel.fitting_mode);
1554 intel_pch_panel_fitting(intel_crtc, pipe_config,
1555 intel_connector->panel.fitting_mode);
1558 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1561 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1562 "max bw %d pixel clock %iKHz\n",
1563 max_lane_count, common_rates[max_clock],
1564 adjusted_mode->crtc_clock);
1566 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1567 * bpc in between. */
1568 bpp = pipe_config->pipe_bpp;
1569 if (is_edp(intel_dp)) {
1571 /* Get bpp from vbt only for panels that dont have bpp in edid */
1572 if (intel_connector->base.display_info.bpc == 0 &&
1573 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1574 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1575 dev_priv->vbt.edp_bpp);
1576 bpp = dev_priv->vbt.edp_bpp;
1580 * Use the maximum clock and number of lanes the eDP panel
1581 * advertizes being capable of. The panels are generally
1582 * designed to support only a single clock and lane
1583 * configuration, and typically these values correspond to the
1584 * native resolution of the panel.
1586 min_lane_count = max_lane_count;
1587 min_clock = max_clock;
1590 for (; bpp >= 6*3; bpp -= 2*3) {
1591 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1594 for (clock = min_clock; clock <= max_clock; clock++) {
1595 for (lane_count = min_lane_count;
1596 lane_count <= max_lane_count;
1599 link_clock = common_rates[clock];
1600 link_avail = intel_dp_max_data_rate(link_clock,
1603 if (mode_rate <= link_avail) {
1613 if (intel_dp->color_range_auto) {
1616 * CEA-861-E - 5.1 Default Encoding Parameters
1617 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1619 pipe_config->limited_color_range =
1620 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1622 pipe_config->limited_color_range =
1623 intel_dp->limited_color_range;
1626 pipe_config->lane_count = lane_count;
1628 pipe_config->pipe_bpp = bpp;
1629 pipe_config->port_clock = common_rates[clock];
1631 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1632 &link_bw, &rate_select);
1634 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1635 link_bw, rate_select, pipe_config->lane_count,
1636 pipe_config->port_clock, bpp);
1637 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1638 mode_rate, link_avail);
1640 intel_link_compute_m_n(bpp, lane_count,
1641 adjusted_mode->crtc_clock,
1642 pipe_config->port_clock,
1643 &pipe_config->dp_m_n);
1645 if (intel_connector->panel.downclock_mode != NULL &&
1646 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1647 pipe_config->has_drrs = true;
1648 intel_link_compute_m_n(bpp, lane_count,
1649 intel_connector->panel.downclock_mode->clock,
1650 pipe_config->port_clock,
1651 &pipe_config->dp_m2_n2);
1654 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
1655 skl_edp_set_pll_config(pipe_config);
1656 else if (IS_BROXTON(dev))
1657 /* handled in ddi */;
1658 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1659 hsw_dp_set_ddi_pll_sel(pipe_config);
1661 intel_dp_set_clock(encoder, pipe_config);
1666 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1667 const struct intel_crtc_state *pipe_config)
1669 intel_dp->link_rate = pipe_config->port_clock;
1670 intel_dp->lane_count = pipe_config->lane_count;
1673 static void intel_dp_prepare(struct intel_encoder *encoder)
1675 struct drm_device *dev = encoder->base.dev;
1676 struct drm_i915_private *dev_priv = dev->dev_private;
1677 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1678 enum port port = dp_to_dig_port(intel_dp)->port;
1679 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1680 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1682 intel_dp_set_link_params(intel_dp, crtc->config);
1685 * There are four kinds of DP registers:
1692 * IBX PCH and CPU are the same for almost everything,
1693 * except that the CPU DP PLL is configured in this
1696 * CPT PCH is quite different, having many bits moved
1697 * to the TRANS_DP_CTL register instead. That
1698 * configuration happens (oddly) in ironlake_pch_enable
1701 /* Preserve the BIOS-computed detected bit. This is
1702 * supposed to be read-only.
1704 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1706 /* Handle DP bits in common between all three register formats */
1707 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1708 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1710 /* Split out the IBX/CPU vs CPT settings */
1712 if (IS_GEN7(dev) && port == PORT_A) {
1713 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1714 intel_dp->DP |= DP_SYNC_HS_HIGH;
1715 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1716 intel_dp->DP |= DP_SYNC_VS_HIGH;
1717 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1719 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1720 intel_dp->DP |= DP_ENHANCED_FRAMING;
1722 intel_dp->DP |= crtc->pipe << 29;
1723 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1726 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1728 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1729 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1730 trans_dp |= TRANS_DP_ENH_FRAMING;
1732 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1733 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1735 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1736 !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
1737 intel_dp->DP |= DP_COLOR_RANGE_16_235;
1739 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1740 intel_dp->DP |= DP_SYNC_HS_HIGH;
1741 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1742 intel_dp->DP |= DP_SYNC_VS_HIGH;
1743 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1745 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1746 intel_dp->DP |= DP_ENHANCED_FRAMING;
1748 if (IS_CHERRYVIEW(dev))
1749 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1750 else if (crtc->pipe == PIPE_B)
1751 intel_dp->DP |= DP_PIPEB_SELECT;
1755 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1756 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1758 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1759 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1761 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1762 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1764 static void wait_panel_status(struct intel_dp *intel_dp,
1768 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1769 struct drm_i915_private *dev_priv = dev->dev_private;
1770 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1772 lockdep_assert_held(&dev_priv->pps_mutex);
1774 pp_stat_reg = _pp_stat_reg(intel_dp);
1775 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1777 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1779 I915_READ(pp_stat_reg),
1780 I915_READ(pp_ctrl_reg));
1782 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1783 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1784 I915_READ(pp_stat_reg),
1785 I915_READ(pp_ctrl_reg));
1788 DRM_DEBUG_KMS("Wait complete\n");
1791 static void wait_panel_on(struct intel_dp *intel_dp)
1793 DRM_DEBUG_KMS("Wait for panel power on\n");
1794 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1797 static void wait_panel_off(struct intel_dp *intel_dp)
1799 DRM_DEBUG_KMS("Wait for panel power off time\n");
1800 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1803 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1805 ktime_t panel_power_on_time;
1806 s64 panel_power_off_duration;
1808 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1810 /* take the difference of currrent time and panel power off time
1811 * and then make panel wait for t11_t12 if needed. */
1812 panel_power_on_time = ktime_get_boottime();
1813 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
1815 /* When we disable the VDD override bit last we have to do the manual
1817 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
1818 wait_remaining_ms_from_jiffies(jiffies,
1819 intel_dp->panel_power_cycle_delay - panel_power_off_duration);
1821 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1824 static void wait_backlight_on(struct intel_dp *intel_dp)
1826 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1827 intel_dp->backlight_on_delay);
1830 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1832 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1833 intel_dp->backlight_off_delay);
1836 /* Read the current pp_control value, unlocking the register if it
1840 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1842 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1843 struct drm_i915_private *dev_priv = dev->dev_private;
1846 lockdep_assert_held(&dev_priv->pps_mutex);
1848 control = I915_READ(_pp_ctrl_reg(intel_dp));
1849 if (!IS_BROXTON(dev)) {
1850 control &= ~PANEL_UNLOCK_MASK;
1851 control |= PANEL_UNLOCK_REGS;
1857 * Must be paired with edp_panel_vdd_off().
1858 * Must hold pps_mutex around the whole on/off sequence.
1859 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1861 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1863 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1864 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1865 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1866 struct drm_i915_private *dev_priv = dev->dev_private;
1867 enum intel_display_power_domain power_domain;
1869 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1870 bool need_to_disable = !intel_dp->want_panel_vdd;
1872 lockdep_assert_held(&dev_priv->pps_mutex);
1874 if (!is_edp(intel_dp))
1877 cancel_delayed_work(&intel_dp->panel_vdd_work);
1878 intel_dp->want_panel_vdd = true;
1880 if (edp_have_panel_vdd(intel_dp))
1881 return need_to_disable;
1883 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1884 intel_display_power_get(dev_priv, power_domain);
1886 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1887 port_name(intel_dig_port->port));
1889 if (!edp_have_panel_power(intel_dp))
1890 wait_panel_power_cycle(intel_dp);
1892 pp = ironlake_get_pp_control(intel_dp);
1893 pp |= EDP_FORCE_VDD;
1895 pp_stat_reg = _pp_stat_reg(intel_dp);
1896 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1898 I915_WRITE(pp_ctrl_reg, pp);
1899 POSTING_READ(pp_ctrl_reg);
1900 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1901 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1903 * If the panel wasn't on, delay before accessing aux channel
1905 if (!edp_have_panel_power(intel_dp)) {
1906 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1907 port_name(intel_dig_port->port));
1908 msleep(intel_dp->panel_power_up_delay);
1911 return need_to_disable;
1915 * Must be paired with intel_edp_panel_vdd_off() or
1916 * intel_edp_panel_off().
1917 * Nested calls to these functions are not allowed since
1918 * we drop the lock. Caller must use some higher level
1919 * locking to prevent nested calls from other threads.
1921 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1925 if (!is_edp(intel_dp))
1929 vdd = edp_panel_vdd_on(intel_dp);
1930 pps_unlock(intel_dp);
1932 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1933 port_name(dp_to_dig_port(intel_dp)->port));
1936 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1938 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1939 struct drm_i915_private *dev_priv = dev->dev_private;
1940 struct intel_digital_port *intel_dig_port =
1941 dp_to_dig_port(intel_dp);
1942 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1943 enum intel_display_power_domain power_domain;
1945 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1947 lockdep_assert_held(&dev_priv->pps_mutex);
1949 WARN_ON(intel_dp->want_panel_vdd);
1951 if (!edp_have_panel_vdd(intel_dp))
1954 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1955 port_name(intel_dig_port->port));
1957 pp = ironlake_get_pp_control(intel_dp);
1958 pp &= ~EDP_FORCE_VDD;
1960 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1961 pp_stat_reg = _pp_stat_reg(intel_dp);
1963 I915_WRITE(pp_ctrl_reg, pp);
1964 POSTING_READ(pp_ctrl_reg);
1966 /* Make sure sequencer is idle before allowing subsequent activity */
1967 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1968 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1970 if ((pp & POWER_TARGET_ON) == 0)
1971 intel_dp->panel_power_off_time = ktime_get_boottime();
1973 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1974 intel_display_power_put(dev_priv, power_domain);
1977 static void edp_panel_vdd_work(struct work_struct *__work)
1979 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1980 struct intel_dp, panel_vdd_work);
1983 if (!intel_dp->want_panel_vdd)
1984 edp_panel_vdd_off_sync(intel_dp);
1985 pps_unlock(intel_dp);
1988 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1990 unsigned long delay;
1993 * Queue the timer to fire a long time from now (relative to the power
1994 * down delay) to keep the panel power up across a sequence of
1997 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1998 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2002 * Must be paired with edp_panel_vdd_on().
2003 * Must hold pps_mutex around the whole on/off sequence.
2004 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2006 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2008 struct drm_i915_private *dev_priv =
2009 intel_dp_to_dev(intel_dp)->dev_private;
2011 lockdep_assert_held(&dev_priv->pps_mutex);
2013 if (!is_edp(intel_dp))
2016 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2017 port_name(dp_to_dig_port(intel_dp)->port));
2019 intel_dp->want_panel_vdd = false;
2022 edp_panel_vdd_off_sync(intel_dp);
2024 edp_panel_vdd_schedule_off(intel_dp);
2027 static void edp_panel_on(struct intel_dp *intel_dp)
2029 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2030 struct drm_i915_private *dev_priv = dev->dev_private;
2032 i915_reg_t pp_ctrl_reg;
2034 lockdep_assert_held(&dev_priv->pps_mutex);
2036 if (!is_edp(intel_dp))
2039 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2040 port_name(dp_to_dig_port(intel_dp)->port));
2042 if (WARN(edp_have_panel_power(intel_dp),
2043 "eDP port %c panel power already on\n",
2044 port_name(dp_to_dig_port(intel_dp)->port)))
2047 wait_panel_power_cycle(intel_dp);
2049 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2050 pp = ironlake_get_pp_control(intel_dp);
2052 /* ILK workaround: disable reset around power sequence */
2053 pp &= ~PANEL_POWER_RESET;
2054 I915_WRITE(pp_ctrl_reg, pp);
2055 POSTING_READ(pp_ctrl_reg);
2058 pp |= POWER_TARGET_ON;
2060 pp |= PANEL_POWER_RESET;
2062 I915_WRITE(pp_ctrl_reg, pp);
2063 POSTING_READ(pp_ctrl_reg);
2065 wait_panel_on(intel_dp);
2066 intel_dp->last_power_on = jiffies;
2069 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2070 I915_WRITE(pp_ctrl_reg, pp);
2071 POSTING_READ(pp_ctrl_reg);
2075 void intel_edp_panel_on(struct intel_dp *intel_dp)
2077 if (!is_edp(intel_dp))
2081 edp_panel_on(intel_dp);
2082 pps_unlock(intel_dp);
2086 static void edp_panel_off(struct intel_dp *intel_dp)
2088 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2089 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2090 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2091 struct drm_i915_private *dev_priv = dev->dev_private;
2092 enum intel_display_power_domain power_domain;
2094 i915_reg_t pp_ctrl_reg;
2096 lockdep_assert_held(&dev_priv->pps_mutex);
2098 if (!is_edp(intel_dp))
2101 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2102 port_name(dp_to_dig_port(intel_dp)->port));
2104 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2105 port_name(dp_to_dig_port(intel_dp)->port));
2107 pp = ironlake_get_pp_control(intel_dp);
2108 /* We need to switch off panel power _and_ force vdd, for otherwise some
2109 * panels get very unhappy and cease to work. */
2110 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2113 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2115 intel_dp->want_panel_vdd = false;
2117 I915_WRITE(pp_ctrl_reg, pp);
2118 POSTING_READ(pp_ctrl_reg);
2120 intel_dp->panel_power_off_time = ktime_get_boottime();
2121 wait_panel_off(intel_dp);
2123 /* We got a reference when we enabled the VDD. */
2124 power_domain = intel_display_port_aux_power_domain(intel_encoder);
2125 intel_display_power_put(dev_priv, power_domain);
2128 void intel_edp_panel_off(struct intel_dp *intel_dp)
2130 if (!is_edp(intel_dp))
2134 edp_panel_off(intel_dp);
2135 pps_unlock(intel_dp);
2138 /* Enable backlight in the panel power control. */
2139 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2141 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2142 struct drm_device *dev = intel_dig_port->base.base.dev;
2143 struct drm_i915_private *dev_priv = dev->dev_private;
2145 i915_reg_t pp_ctrl_reg;
2148 * If we enable the backlight right away following a panel power
2149 * on, we may see slight flicker as the panel syncs with the eDP
2150 * link. So delay a bit to make sure the image is solid before
2151 * allowing it to appear.
2153 wait_backlight_on(intel_dp);
2157 pp = ironlake_get_pp_control(intel_dp);
2158 pp |= EDP_BLC_ENABLE;
2160 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2162 I915_WRITE(pp_ctrl_reg, pp);
2163 POSTING_READ(pp_ctrl_reg);
2165 pps_unlock(intel_dp);
2168 /* Enable backlight PWM and backlight PP control. */
2169 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2171 if (!is_edp(intel_dp))
2174 DRM_DEBUG_KMS("\n");
2176 intel_panel_enable_backlight(intel_dp->attached_connector);
2177 _intel_edp_backlight_on(intel_dp);
2180 /* Disable backlight in the panel power control. */
2181 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2183 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2184 struct drm_i915_private *dev_priv = dev->dev_private;
2186 i915_reg_t pp_ctrl_reg;
2188 if (!is_edp(intel_dp))
2193 pp = ironlake_get_pp_control(intel_dp);
2194 pp &= ~EDP_BLC_ENABLE;
2196 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2198 I915_WRITE(pp_ctrl_reg, pp);
2199 POSTING_READ(pp_ctrl_reg);
2201 pps_unlock(intel_dp);
2203 intel_dp->last_backlight_off = jiffies;
2204 edp_wait_backlight_off(intel_dp);
2207 /* Disable backlight PP control and backlight PWM. */
2208 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2210 if (!is_edp(intel_dp))
2213 DRM_DEBUG_KMS("\n");
2215 _intel_edp_backlight_off(intel_dp);
2216 intel_panel_disable_backlight(intel_dp->attached_connector);
2220 * Hook for controlling the panel power control backlight through the bl_power
2221 * sysfs attribute. Take care to handle multiple calls.
2223 static void intel_edp_backlight_power(struct intel_connector *connector,
2226 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2230 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2231 pps_unlock(intel_dp);
2233 if (is_enabled == enable)
2236 DRM_DEBUG_KMS("panel power control backlight %s\n",
2237 enable ? "enable" : "disable");
2240 _intel_edp_backlight_on(intel_dp);
2242 _intel_edp_backlight_off(intel_dp);
2245 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2247 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2248 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2249 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2251 I915_STATE_WARN(cur_state != state,
2252 "DP port %c state assertion failure (expected %s, current %s)\n",
2253 port_name(dig_port->port),
2254 onoff(state), onoff(cur_state));
2256 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2258 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2260 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2262 I915_STATE_WARN(cur_state != state,
2263 "eDP PLL state assertion failure (expected %s, current %s)\n",
2264 onoff(state), onoff(cur_state));
2266 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2267 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2269 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2271 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2272 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2273 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2275 assert_pipe_disabled(dev_priv, crtc->pipe);
2276 assert_dp_port_disabled(intel_dp);
2277 assert_edp_pll_disabled(dev_priv);
2279 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2280 crtc->config->port_clock);
2282 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2284 if (crtc->config->port_clock == 162000)
2285 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2287 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2289 I915_WRITE(DP_A, intel_dp->DP);
2293 intel_dp->DP |= DP_PLL_ENABLE;
2295 I915_WRITE(DP_A, intel_dp->DP);
2300 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2302 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2303 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2304 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2306 assert_pipe_disabled(dev_priv, crtc->pipe);
2307 assert_dp_port_disabled(intel_dp);
2308 assert_edp_pll_enabled(dev_priv);
2310 DRM_DEBUG_KMS("disabling eDP PLL\n");
2312 intel_dp->DP &= ~DP_PLL_ENABLE;
2314 I915_WRITE(DP_A, intel_dp->DP);
2319 /* If the sink supports it, try to set the power state appropriately */
2320 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2324 /* Should have a valid DPCD by this point */
2325 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2328 if (mode != DRM_MODE_DPMS_ON) {
2329 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2333 * When turning on, we need to retry for 1ms to give the sink
2336 for (i = 0; i < 3; i++) {
2337 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2346 DRM_DEBUG_KMS("failed to %s sink power state\n",
2347 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2350 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2353 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2354 enum port port = dp_to_dig_port(intel_dp)->port;
2355 struct drm_device *dev = encoder->base.dev;
2356 struct drm_i915_private *dev_priv = dev->dev_private;
2357 enum intel_display_power_domain power_domain;
2361 power_domain = intel_display_port_power_domain(encoder);
2362 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2367 tmp = I915_READ(intel_dp->output_reg);
2369 if (!(tmp & DP_PORT_EN))
2372 if (IS_GEN7(dev) && port == PORT_A) {
2373 *pipe = PORT_TO_PIPE_CPT(tmp);
2374 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2377 for_each_pipe(dev_priv, p) {
2378 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2379 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2387 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2388 i915_mmio_reg_offset(intel_dp->output_reg));
2389 } else if (IS_CHERRYVIEW(dev)) {
2390 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2392 *pipe = PORT_TO_PIPE(tmp);
2398 intel_display_power_put(dev_priv, power_domain);
2403 static void intel_dp_get_config(struct intel_encoder *encoder,
2404 struct intel_crtc_state *pipe_config)
2406 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2408 struct drm_device *dev = encoder->base.dev;
2409 struct drm_i915_private *dev_priv = dev->dev_private;
2410 enum port port = dp_to_dig_port(intel_dp)->port;
2411 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2414 tmp = I915_READ(intel_dp->output_reg);
2416 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2418 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2419 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2421 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2422 flags |= DRM_MODE_FLAG_PHSYNC;
2424 flags |= DRM_MODE_FLAG_NHSYNC;
2426 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2427 flags |= DRM_MODE_FLAG_PVSYNC;
2429 flags |= DRM_MODE_FLAG_NVSYNC;
2431 if (tmp & DP_SYNC_HS_HIGH)
2432 flags |= DRM_MODE_FLAG_PHSYNC;
2434 flags |= DRM_MODE_FLAG_NHSYNC;
2436 if (tmp & DP_SYNC_VS_HIGH)
2437 flags |= DRM_MODE_FLAG_PVSYNC;
2439 flags |= DRM_MODE_FLAG_NVSYNC;
2442 pipe_config->base.adjusted_mode.flags |= flags;
2444 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2445 !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
2446 pipe_config->limited_color_range = true;
2448 pipe_config->has_dp_encoder = true;
2450 pipe_config->lane_count =
2451 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2453 intel_dp_get_m_n(crtc, pipe_config);
2455 if (port == PORT_A) {
2456 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2457 pipe_config->port_clock = 162000;
2459 pipe_config->port_clock = 270000;
2462 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2463 &pipe_config->dp_m_n);
2465 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2466 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2468 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2470 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2471 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2473 * This is a big fat ugly hack.
2475 * Some machines in UEFI boot mode provide us a VBT that has 18
2476 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2477 * unknown we fail to light up. Yet the same BIOS boots up with
2478 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2479 * max, not what it tells us to use.
2481 * Note: This will still be broken if the eDP panel is not lit
2482 * up by the BIOS, and thus we can't get the mode at module
2485 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2486 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2487 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2491 static void intel_disable_dp(struct intel_encoder *encoder)
2493 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2494 struct drm_device *dev = encoder->base.dev;
2495 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2497 if (crtc->config->has_audio)
2498 intel_audio_codec_disable(encoder);
2500 if (HAS_PSR(dev) && !HAS_DDI(dev))
2501 intel_psr_disable(intel_dp);
2503 /* Make sure the panel is off before trying to change the mode. But also
2504 * ensure that we have vdd while we switch off the panel. */
2505 intel_edp_panel_vdd_on(intel_dp);
2506 intel_edp_backlight_off(intel_dp);
2507 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2508 intel_edp_panel_off(intel_dp);
2510 /* disable the port before the pipe on g4x */
2511 if (INTEL_INFO(dev)->gen < 5)
2512 intel_dp_link_down(intel_dp);
2515 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2517 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2518 enum port port = dp_to_dig_port(intel_dp)->port;
2520 intel_dp_link_down(intel_dp);
2522 /* Only ilk+ has port A */
2524 ironlake_edp_pll_off(intel_dp);
2527 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2529 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2531 intel_dp_link_down(intel_dp);
2534 static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2537 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2538 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2539 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2540 enum pipe pipe = crtc->pipe;
2543 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2545 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2547 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2548 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2550 if (crtc->config->lane_count > 2) {
2551 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2553 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2555 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2556 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2559 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2560 val |= CHV_PCS_REQ_SOFTRESET_EN;
2562 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2564 val |= DPIO_PCS_CLK_SOFT_RESET;
2565 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2567 if (crtc->config->lane_count > 2) {
2568 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2569 val |= CHV_PCS_REQ_SOFTRESET_EN;
2571 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2573 val |= DPIO_PCS_CLK_SOFT_RESET;
2574 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2578 static void chv_post_disable_dp(struct intel_encoder *encoder)
2580 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2581 struct drm_device *dev = encoder->base.dev;
2582 struct drm_i915_private *dev_priv = dev->dev_private;
2584 intel_dp_link_down(intel_dp);
2586 mutex_lock(&dev_priv->sb_lock);
2588 /* Assert data lane reset */
2589 chv_data_lane_soft_reset(encoder, true);
2591 mutex_unlock(&dev_priv->sb_lock);
2595 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2597 uint8_t dp_train_pat)
2599 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2600 struct drm_device *dev = intel_dig_port->base.base.dev;
2601 struct drm_i915_private *dev_priv = dev->dev_private;
2602 enum port port = intel_dig_port->port;
2605 uint32_t temp = I915_READ(DP_TP_CTL(port));
2607 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2608 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2610 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2612 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2613 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2614 case DP_TRAINING_PATTERN_DISABLE:
2615 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2618 case DP_TRAINING_PATTERN_1:
2619 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2621 case DP_TRAINING_PATTERN_2:
2622 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2624 case DP_TRAINING_PATTERN_3:
2625 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2628 I915_WRITE(DP_TP_CTL(port), temp);
2630 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2631 (HAS_PCH_CPT(dev) && port != PORT_A)) {
2632 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2634 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2635 case DP_TRAINING_PATTERN_DISABLE:
2636 *DP |= DP_LINK_TRAIN_OFF_CPT;
2638 case DP_TRAINING_PATTERN_1:
2639 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2641 case DP_TRAINING_PATTERN_2:
2642 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2644 case DP_TRAINING_PATTERN_3:
2645 DRM_ERROR("DP training pattern 3 not supported\n");
2646 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2651 if (IS_CHERRYVIEW(dev))
2652 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2654 *DP &= ~DP_LINK_TRAIN_MASK;
2656 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2657 case DP_TRAINING_PATTERN_DISABLE:
2658 *DP |= DP_LINK_TRAIN_OFF;
2660 case DP_TRAINING_PATTERN_1:
2661 *DP |= DP_LINK_TRAIN_PAT_1;
2663 case DP_TRAINING_PATTERN_2:
2664 *DP |= DP_LINK_TRAIN_PAT_2;
2666 case DP_TRAINING_PATTERN_3:
2667 if (IS_CHERRYVIEW(dev)) {
2668 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2670 DRM_ERROR("DP training pattern 3 not supported\n");
2671 *DP |= DP_LINK_TRAIN_PAT_2;
2678 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2680 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2681 struct drm_i915_private *dev_priv = dev->dev_private;
2682 struct intel_crtc *crtc =
2683 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
2685 /* enable with pattern 1 (as per spec) */
2686 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2687 DP_TRAINING_PATTERN_1);
2689 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2690 POSTING_READ(intel_dp->output_reg);
2693 * Magic for VLV/CHV. We _must_ first set up the register
2694 * without actually enabling the port, and then do another
2695 * write to enable the port. Otherwise link training will
2696 * fail when the power sequencer is freshly used for this port.
2698 intel_dp->DP |= DP_PORT_EN;
2699 if (crtc->config->has_audio)
2700 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2702 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2703 POSTING_READ(intel_dp->output_reg);
2706 static void intel_enable_dp(struct intel_encoder *encoder)
2708 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2709 struct drm_device *dev = encoder->base.dev;
2710 struct drm_i915_private *dev_priv = dev->dev_private;
2711 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2712 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2713 enum port port = dp_to_dig_port(intel_dp)->port;
2714 enum pipe pipe = crtc->pipe;
2716 if (WARN_ON(dp_reg & DP_PORT_EN))
2721 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
2722 vlv_init_panel_power_sequencer(intel_dp);
2725 * We get an occasional spurious underrun between the port
2726 * enable and vdd enable, when enabling port A eDP.
2728 * FIXME: Not sure if this applies to (PCH) port D eDP as well
2731 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2733 intel_dp_enable_port(intel_dp);
2735 if (port == PORT_A && IS_GEN5(dev_priv)) {
2737 * Underrun reporting for the other pipe was disabled in
2738 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2739 * enabled, so it's now safe to re-enable underrun reporting.
2741 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2742 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2743 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2746 edp_panel_vdd_on(intel_dp);
2747 edp_panel_on(intel_dp);
2748 edp_panel_vdd_off(intel_dp, true);
2751 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2753 pps_unlock(intel_dp);
2755 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
2756 unsigned int lane_mask = 0x0;
2758 if (IS_CHERRYVIEW(dev))
2759 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2761 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2765 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2766 intel_dp_start_link_train(intel_dp);
2767 intel_dp_stop_link_train(intel_dp);
2769 if (crtc->config->has_audio) {
2770 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2772 intel_audio_codec_enable(encoder);
2776 static void g4x_enable_dp(struct intel_encoder *encoder)
2778 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2780 intel_enable_dp(encoder);
2781 intel_edp_backlight_on(intel_dp);
2784 static void vlv_enable_dp(struct intel_encoder *encoder)
2786 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2788 intel_edp_backlight_on(intel_dp);
2789 intel_psr_enable(intel_dp);
2792 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2794 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2795 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2796 enum port port = dp_to_dig_port(intel_dp)->port;
2797 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2799 intel_dp_prepare(encoder);
2801 if (port == PORT_A && IS_GEN5(dev_priv)) {
2803 * We get FIFO underruns on the other pipe when
2804 * enabling the CPU eDP PLL, and when enabling CPU
2805 * eDP port. We could potentially avoid the PLL
2806 * underrun with a vblank wait just prior to enabling
2807 * the PLL, but that doesn't appear to help the port
2808 * enable case. Just sweep it all under the rug.
2810 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2811 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2814 /* Only ilk+ has port A */
2816 ironlake_edp_pll_on(intel_dp);
2819 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2821 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2822 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2823 enum pipe pipe = intel_dp->pps_pipe;
2824 i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2826 edp_panel_vdd_off_sync(intel_dp);
2829 * VLV seems to get confused when multiple power seqeuencers
2830 * have the same port selected (even if only one has power/vdd
2831 * enabled). The failure manifests as vlv_wait_port_ready() failing
2832 * CHV on the other hand doesn't seem to mind having the same port
2833 * selected in multiple power seqeuencers, but let's clear the
2834 * port select always when logically disconnecting a power sequencer
2837 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2838 pipe_name(pipe), port_name(intel_dig_port->port));
2839 I915_WRITE(pp_on_reg, 0);
2840 POSTING_READ(pp_on_reg);
2842 intel_dp->pps_pipe = INVALID_PIPE;
2845 static void vlv_steal_power_sequencer(struct drm_device *dev,
2848 struct drm_i915_private *dev_priv = dev->dev_private;
2849 struct intel_encoder *encoder;
2851 lockdep_assert_held(&dev_priv->pps_mutex);
2853 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2856 for_each_intel_encoder(dev, encoder) {
2857 struct intel_dp *intel_dp;
2860 if (encoder->type != INTEL_OUTPUT_EDP)
2863 intel_dp = enc_to_intel_dp(&encoder->base);
2864 port = dp_to_dig_port(intel_dp)->port;
2866 if (intel_dp->pps_pipe != pipe)
2869 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2870 pipe_name(pipe), port_name(port));
2872 WARN(encoder->base.crtc,
2873 "stealing pipe %c power sequencer from active eDP port %c\n",
2874 pipe_name(pipe), port_name(port));
2876 /* make sure vdd is off before we steal it */
2877 vlv_detach_power_sequencer(intel_dp);
2881 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2883 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2884 struct intel_encoder *encoder = &intel_dig_port->base;
2885 struct drm_device *dev = encoder->base.dev;
2886 struct drm_i915_private *dev_priv = dev->dev_private;
2887 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2889 lockdep_assert_held(&dev_priv->pps_mutex);
2891 if (!is_edp(intel_dp))
2894 if (intel_dp->pps_pipe == crtc->pipe)
2898 * If another power sequencer was being used on this
2899 * port previously make sure to turn off vdd there while
2900 * we still have control of it.
2902 if (intel_dp->pps_pipe != INVALID_PIPE)
2903 vlv_detach_power_sequencer(intel_dp);
2906 * We may be stealing the power
2907 * sequencer from another port.
2909 vlv_steal_power_sequencer(dev, crtc->pipe);
2911 /* now it's all ours */
2912 intel_dp->pps_pipe = crtc->pipe;
2914 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2915 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2917 /* init power sequencer on this pipe and port */
2918 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2919 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2922 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2924 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2925 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2926 struct drm_device *dev = encoder->base.dev;
2927 struct drm_i915_private *dev_priv = dev->dev_private;
2928 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2929 enum dpio_channel port = vlv_dport_to_channel(dport);
2930 int pipe = intel_crtc->pipe;
2933 mutex_lock(&dev_priv->sb_lock);
2935 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2942 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2943 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2944 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2946 mutex_unlock(&dev_priv->sb_lock);
2948 intel_enable_dp(encoder);
2951 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2953 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2954 struct drm_device *dev = encoder->base.dev;
2955 struct drm_i915_private *dev_priv = dev->dev_private;
2956 struct intel_crtc *intel_crtc =
2957 to_intel_crtc(encoder->base.crtc);
2958 enum dpio_channel port = vlv_dport_to_channel(dport);
2959 int pipe = intel_crtc->pipe;
2961 intel_dp_prepare(encoder);
2963 /* Program Tx lane resets to default */
2964 mutex_lock(&dev_priv->sb_lock);
2965 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2966 DPIO_PCS_TX_LANE2_RESET |
2967 DPIO_PCS_TX_LANE1_RESET);
2968 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2969 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2970 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2971 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2972 DPIO_PCS_CLK_SOFT_RESET);
2974 /* Fix up inter-pair skew failure */
2975 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2976 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2977 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2978 mutex_unlock(&dev_priv->sb_lock);
2981 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2983 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2984 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2985 struct drm_device *dev = encoder->base.dev;
2986 struct drm_i915_private *dev_priv = dev->dev_private;
2987 struct intel_crtc *intel_crtc =
2988 to_intel_crtc(encoder->base.crtc);
2989 enum dpio_channel ch = vlv_dport_to_channel(dport);
2990 int pipe = intel_crtc->pipe;
2991 int data, i, stagger;
2994 mutex_lock(&dev_priv->sb_lock);
2996 /* allow hardware to manage TX FIFO reset source */
2997 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2998 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2999 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3001 if (intel_crtc->config->lane_count > 2) {
3002 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3003 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
3004 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3007 /* Program Tx lane latency optimal setting*/
3008 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3009 /* Set the upar bit */
3010 if (intel_crtc->config->lane_count == 1)
3013 data = (i == 1) ? 0x0 : 0x1;
3014 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
3015 data << DPIO_UPAR_SHIFT);
3018 /* Data lane stagger programming */
3019 if (intel_crtc->config->port_clock > 270000)
3021 else if (intel_crtc->config->port_clock > 135000)
3023 else if (intel_crtc->config->port_clock > 67500)
3025 else if (intel_crtc->config->port_clock > 33750)
3030 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3031 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3032 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3034 if (intel_crtc->config->lane_count > 2) {
3035 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3036 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3037 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3040 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
3041 DPIO_LANESTAGGER_STRAP(stagger) |
3042 DPIO_LANESTAGGER_STRAP_OVRD |
3043 DPIO_TX1_STAGGER_MASK(0x1f) |
3044 DPIO_TX1_STAGGER_MULT(6) |
3045 DPIO_TX2_STAGGER_MULT(0));
3047 if (intel_crtc->config->lane_count > 2) {
3048 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3049 DPIO_LANESTAGGER_STRAP(stagger) |
3050 DPIO_LANESTAGGER_STRAP_OVRD |
3051 DPIO_TX1_STAGGER_MASK(0x1f) |
3052 DPIO_TX1_STAGGER_MULT(7) |
3053 DPIO_TX2_STAGGER_MULT(5));
3056 /* Deassert data lane reset */
3057 chv_data_lane_soft_reset(encoder, false);
3059 mutex_unlock(&dev_priv->sb_lock);
3061 intel_enable_dp(encoder);
3063 /* Second common lane will stay alive on its own now */
3064 if (dport->release_cl2_override) {
3065 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3066 dport->release_cl2_override = false;
3070 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3072 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3073 struct drm_device *dev = encoder->base.dev;
3074 struct drm_i915_private *dev_priv = dev->dev_private;
3075 struct intel_crtc *intel_crtc =
3076 to_intel_crtc(encoder->base.crtc);
3077 enum dpio_channel ch = vlv_dport_to_channel(dport);
3078 enum pipe pipe = intel_crtc->pipe;
3079 unsigned int lane_mask =
3080 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
3083 intel_dp_prepare(encoder);
3086 * Must trick the second common lane into life.
3087 * Otherwise we can't even access the PLL.
3089 if (ch == DPIO_CH0 && pipe == PIPE_B)
3090 dport->release_cl2_override =
3091 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
3093 chv_phy_powergate_lanes(encoder, true, lane_mask);
3095 mutex_lock(&dev_priv->sb_lock);
3097 /* Assert data lane reset */
3098 chv_data_lane_soft_reset(encoder, true);
3100 /* program left/right clock distribution */
3101 if (pipe != PIPE_B) {
3102 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3103 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3105 val |= CHV_BUFLEFTENA1_FORCE;
3107 val |= CHV_BUFRIGHTENA1_FORCE;
3108 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3110 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3111 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3113 val |= CHV_BUFLEFTENA2_FORCE;
3115 val |= CHV_BUFRIGHTENA2_FORCE;
3116 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3119 /* program clock channel usage */
3120 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3121 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3123 val &= ~CHV_PCS_USEDCLKCHANNEL;
3125 val |= CHV_PCS_USEDCLKCHANNEL;
3126 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3128 if (intel_crtc->config->lane_count > 2) {
3129 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3130 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3132 val &= ~CHV_PCS_USEDCLKCHANNEL;
3134 val |= CHV_PCS_USEDCLKCHANNEL;
3135 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3139 * This a a bit weird since generally CL
3140 * matches the pipe, but here we need to
3141 * pick the CL based on the port.
3143 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3145 val &= ~CHV_CMN_USEDCLKCHANNEL;
3147 val |= CHV_CMN_USEDCLKCHANNEL;
3148 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3150 mutex_unlock(&dev_priv->sb_lock);
3153 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3155 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3156 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3159 mutex_lock(&dev_priv->sb_lock);
3161 /* disable left/right clock distribution */
3162 if (pipe != PIPE_B) {
3163 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3164 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3165 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3167 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3168 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3169 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3172 mutex_unlock(&dev_priv->sb_lock);
3175 * Leave the power down bit cleared for at least one
3176 * lane so that chv_powergate_phy_ch() will power
3177 * on something when the channel is otherwise unused.
3178 * When the port is off and the override is removed
3179 * the lanes power down anyway, so otherwise it doesn't
3180 * really matter what the state of power down bits is
3183 chv_phy_powergate_lanes(encoder, false, 0x0);
3187 * Native read with retry for link status and receiver capability reads for
3188 * cases where the sink may still be asleep.
3190 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3191 * supposed to retry 3 times per the spec.
3194 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3195 void *buffer, size_t size)
3201 * Sometime we just get the same incorrect byte repeated
3202 * over the entire buffer. Doing just one throw away read
3203 * initially seems to "solve" it.
3205 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3207 for (i = 0; i < 3; i++) {
3208 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3218 * Fetch AUX CH registers 0x202 - 0x207 which contain
3219 * link status information
3222 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3224 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3227 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3230 /* These are source-specific values. */
3232 intel_dp_voltage_max(struct intel_dp *intel_dp)
3234 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3235 struct drm_i915_private *dev_priv = dev->dev_private;
3236 enum port port = dp_to_dig_port(intel_dp)->port;
3238 if (IS_BROXTON(dev))
3239 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3240 else if (INTEL_INFO(dev)->gen >= 9) {
3241 if (dev_priv->edp_low_vswing && port == PORT_A)
3242 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3243 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3244 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
3245 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3246 else if (IS_GEN7(dev) && port == PORT_A)
3247 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3248 else if (HAS_PCH_CPT(dev) && port != PORT_A)
3249 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3251 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3255 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3257 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3258 enum port port = dp_to_dig_port(intel_dp)->port;
3260 if (INTEL_INFO(dev)->gen >= 9) {
3261 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3262 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3263 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3264 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3265 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3266 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3267 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3268 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3269 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3271 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3273 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3274 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3275 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3276 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3277 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3278 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3279 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3280 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3281 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3283 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3285 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
3286 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3287 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3288 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3289 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3290 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3291 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3292 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3293 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3295 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3297 } else if (IS_GEN7(dev) && port == PORT_A) {
3298 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3299 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3300 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3301 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3303 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3305 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3308 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3309 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3310 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3311 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3312 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3313 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3314 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3315 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3317 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3322 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3324 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3325 struct drm_i915_private *dev_priv = dev->dev_private;
3326 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3327 struct intel_crtc *intel_crtc =
3328 to_intel_crtc(dport->base.base.crtc);
3329 unsigned long demph_reg_value, preemph_reg_value,
3330 uniqtranscale_reg_value;
3331 uint8_t train_set = intel_dp->train_set[0];
3332 enum dpio_channel port = vlv_dport_to_channel(dport);
3333 int pipe = intel_crtc->pipe;
3335 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3336 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3337 preemph_reg_value = 0x0004000;
3338 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3339 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3340 demph_reg_value = 0x2B405555;
3341 uniqtranscale_reg_value = 0x552AB83A;
3343 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3344 demph_reg_value = 0x2B404040;
3345 uniqtranscale_reg_value = 0x5548B83A;
3347 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3348 demph_reg_value = 0x2B245555;
3349 uniqtranscale_reg_value = 0x5560B83A;
3351 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3352 demph_reg_value = 0x2B405555;
3353 uniqtranscale_reg_value = 0x5598DA3A;
3359 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3360 preemph_reg_value = 0x0002000;
3361 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3362 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3363 demph_reg_value = 0x2B404040;
3364 uniqtranscale_reg_value = 0x5552B83A;
3366 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3367 demph_reg_value = 0x2B404848;
3368 uniqtranscale_reg_value = 0x5580B83A;
3370 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3371 demph_reg_value = 0x2B404040;
3372 uniqtranscale_reg_value = 0x55ADDA3A;
3378 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3379 preemph_reg_value = 0x0000000;
3380 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3381 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3382 demph_reg_value = 0x2B305555;
3383 uniqtranscale_reg_value = 0x5570B83A;
3385 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3386 demph_reg_value = 0x2B2B4040;
3387 uniqtranscale_reg_value = 0x55ADDA3A;
3393 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3394 preemph_reg_value = 0x0006000;
3395 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3396 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3397 demph_reg_value = 0x1B405555;
3398 uniqtranscale_reg_value = 0x55ADDA3A;
3408 mutex_lock(&dev_priv->sb_lock);
3409 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3410 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3411 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3412 uniqtranscale_reg_value);
3413 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3414 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3415 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3416 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3417 mutex_unlock(&dev_priv->sb_lock);
3422 static bool chv_need_uniq_trans_scale(uint8_t train_set)
3424 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3425 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3428 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3430 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3431 struct drm_i915_private *dev_priv = dev->dev_private;
3432 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3433 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3434 u32 deemph_reg_value, margin_reg_value, val;
3435 uint8_t train_set = intel_dp->train_set[0];
3436 enum dpio_channel ch = vlv_dport_to_channel(dport);
3437 enum pipe pipe = intel_crtc->pipe;
3440 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3441 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3442 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3443 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3444 deemph_reg_value = 128;
3445 margin_reg_value = 52;
3447 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3448 deemph_reg_value = 128;
3449 margin_reg_value = 77;
3451 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3452 deemph_reg_value = 128;
3453 margin_reg_value = 102;
3455 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3456 deemph_reg_value = 128;
3457 margin_reg_value = 154;
3458 /* FIXME extra to set for 1200 */
3464 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3465 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3466 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3467 deemph_reg_value = 85;
3468 margin_reg_value = 78;
3470 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3471 deemph_reg_value = 85;
3472 margin_reg_value = 116;
3474 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3475 deemph_reg_value = 85;
3476 margin_reg_value = 154;
3482 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3483 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3484 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3485 deemph_reg_value = 64;
3486 margin_reg_value = 104;
3488 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3489 deemph_reg_value = 64;
3490 margin_reg_value = 154;
3496 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3497 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3498 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3499 deemph_reg_value = 43;
3500 margin_reg_value = 154;
3510 mutex_lock(&dev_priv->sb_lock);
3512 /* Clear calc init */
3513 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3514 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3515 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3516 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3517 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3519 if (intel_crtc->config->lane_count > 2) {
3520 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3521 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3522 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3523 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3524 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3527 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3528 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3529 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3530 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3532 if (intel_crtc->config->lane_count > 2) {
3533 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3534 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3535 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3536 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3539 /* Program swing deemph */
3540 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3541 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3542 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3543 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3544 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3547 /* Program swing margin */
3548 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3549 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3551 val &= ~DPIO_SWING_MARGIN000_MASK;
3552 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3555 * Supposedly this value shouldn't matter when unique transition
3556 * scale is disabled, but in fact it does matter. Let's just
3557 * always program the same value and hope it's OK.
3559 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3560 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3562 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3566 * The document said it needs to set bit 27 for ch0 and bit 26
3567 * for ch1. Might be a typo in the doc.
3568 * For now, for this unique transition scale selection, set bit
3569 * 27 for ch0 and ch1.
3571 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3572 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3573 if (chv_need_uniq_trans_scale(train_set))
3574 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3576 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3577 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3580 /* Start swing calculation */
3581 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3582 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3583 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3585 if (intel_crtc->config->lane_count > 2) {
3586 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3587 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3588 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3591 mutex_unlock(&dev_priv->sb_lock);
3597 gen4_signal_levels(uint8_t train_set)
3599 uint32_t signal_levels = 0;
3601 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3602 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3604 signal_levels |= DP_VOLTAGE_0_4;
3606 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3607 signal_levels |= DP_VOLTAGE_0_6;
3609 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3610 signal_levels |= DP_VOLTAGE_0_8;
3612 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3613 signal_levels |= DP_VOLTAGE_1_2;
3616 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3617 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3619 signal_levels |= DP_PRE_EMPHASIS_0;
3621 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3622 signal_levels |= DP_PRE_EMPHASIS_3_5;
3624 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3625 signal_levels |= DP_PRE_EMPHASIS_6;
3627 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3628 signal_levels |= DP_PRE_EMPHASIS_9_5;
3631 return signal_levels;
3634 /* Gen6's DP voltage swing and pre-emphasis control */
3636 gen6_edp_signal_levels(uint8_t train_set)
3638 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3639 DP_TRAIN_PRE_EMPHASIS_MASK);
3640 switch (signal_levels) {
3641 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3642 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3643 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3644 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3645 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3646 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3647 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3648 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3649 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3650 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3651 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3652 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3653 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3654 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3656 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3657 "0x%x\n", signal_levels);
3658 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3662 /* Gen7's DP voltage swing and pre-emphasis control */
3664 gen7_edp_signal_levels(uint8_t train_set)
3666 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3667 DP_TRAIN_PRE_EMPHASIS_MASK);
3668 switch (signal_levels) {
3669 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3670 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3671 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3672 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3673 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3674 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3676 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3677 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3678 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3679 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3681 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3682 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3683 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3684 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3687 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3688 "0x%x\n", signal_levels);
3689 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3694 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3696 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3697 enum port port = intel_dig_port->port;
3698 struct drm_device *dev = intel_dig_port->base.base.dev;
3699 struct drm_i915_private *dev_priv = to_i915(dev);
3700 uint32_t signal_levels, mask = 0;
3701 uint8_t train_set = intel_dp->train_set[0];
3704 signal_levels = ddi_signal_levels(intel_dp);
3706 if (IS_BROXTON(dev))
3709 mask = DDI_BUF_EMP_MASK;
3710 } else if (IS_CHERRYVIEW(dev)) {
3711 signal_levels = chv_signal_levels(intel_dp);
3712 } else if (IS_VALLEYVIEW(dev)) {
3713 signal_levels = vlv_signal_levels(intel_dp);
3714 } else if (IS_GEN7(dev) && port == PORT_A) {
3715 signal_levels = gen7_edp_signal_levels(train_set);
3716 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3717 } else if (IS_GEN6(dev) && port == PORT_A) {
3718 signal_levels = gen6_edp_signal_levels(train_set);
3719 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3721 signal_levels = gen4_signal_levels(train_set);
3722 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3726 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3728 DRM_DEBUG_KMS("Using vswing level %d\n",
3729 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3730 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3731 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3732 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3734 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3736 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3737 POSTING_READ(intel_dp->output_reg);
3741 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3742 uint8_t dp_train_pat)
3744 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3745 struct drm_i915_private *dev_priv =
3746 to_i915(intel_dig_port->base.base.dev);
3748 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3750 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3751 POSTING_READ(intel_dp->output_reg);
3754 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3756 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3757 struct drm_device *dev = intel_dig_port->base.base.dev;
3758 struct drm_i915_private *dev_priv = dev->dev_private;
3759 enum port port = intel_dig_port->port;
3765 val = I915_READ(DP_TP_CTL(port));
3766 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3767 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3768 I915_WRITE(DP_TP_CTL(port), val);
3771 * On PORT_A we can have only eDP in SST mode. There the only reason
3772 * we need to set idle transmission mode is to work around a HW issue
3773 * where we enable the pipe while not in idle link-training mode.
3774 * In this case there is requirement to wait for a minimum number of
3775 * idle patterns to be sent.
3780 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3782 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3786 intel_dp_link_down(struct intel_dp *intel_dp)
3788 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3789 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3790 enum port port = intel_dig_port->port;
3791 struct drm_device *dev = intel_dig_port->base.base.dev;
3792 struct drm_i915_private *dev_priv = dev->dev_private;
3793 uint32_t DP = intel_dp->DP;
3795 if (WARN_ON(HAS_DDI(dev)))
3798 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3801 DRM_DEBUG_KMS("\n");
3803 if ((IS_GEN7(dev) && port == PORT_A) ||
3804 (HAS_PCH_CPT(dev) && port != PORT_A)) {
3805 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3806 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3808 if (IS_CHERRYVIEW(dev))
3809 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3811 DP &= ~DP_LINK_TRAIN_MASK;
3812 DP |= DP_LINK_TRAIN_PAT_IDLE;
3814 I915_WRITE(intel_dp->output_reg, DP);
3815 POSTING_READ(intel_dp->output_reg);
3817 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3818 I915_WRITE(intel_dp->output_reg, DP);
3819 POSTING_READ(intel_dp->output_reg);
3822 * HW workaround for IBX, we need to move the port
3823 * to transcoder A after disabling it to allow the
3824 * matching HDMI port to be enabled on transcoder A.
3826 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3828 * We get CPU/PCH FIFO underruns on the other pipe when
3829 * doing the workaround. Sweep them under the rug.
3831 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3832 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3834 /* always enable with pattern 1 (as per spec) */
3835 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3836 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3837 I915_WRITE(intel_dp->output_reg, DP);
3838 POSTING_READ(intel_dp->output_reg);
3841 I915_WRITE(intel_dp->output_reg, DP);
3842 POSTING_READ(intel_dp->output_reg);
3844 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3845 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3846 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3849 msleep(intel_dp->panel_power_down_delay);
3855 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3857 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3858 struct drm_device *dev = dig_port->base.base.dev;
3859 struct drm_i915_private *dev_priv = dev->dev_private;
3862 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3863 sizeof(intel_dp->dpcd)) < 0)
3864 return false; /* aux transfer failed */
3866 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3868 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3869 return false; /* DPCD not present */
3871 /* Check if the panel supports PSR */
3872 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3873 if (is_edp(intel_dp)) {
3874 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3876 sizeof(intel_dp->psr_dpcd));
3877 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3878 dev_priv->psr.sink_support = true;
3879 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3882 if (INTEL_INFO(dev)->gen >= 9 &&
3883 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3884 uint8_t frame_sync_cap;
3886 dev_priv->psr.sink_support = true;
3887 intel_dp_dpcd_read_wake(&intel_dp->aux,
3888 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3889 &frame_sync_cap, 1);
3890 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3891 /* PSR2 needs frame sync as well */
3892 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3893 DRM_DEBUG_KMS("PSR2 %s on sink",
3894 dev_priv->psr.psr2_support ? "supported" : "not supported");
3898 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
3899 yesno(intel_dp_source_supports_hbr2(intel_dp)),
3900 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
3902 /* Intermediate frequency support */
3903 if (is_edp(intel_dp) &&
3904 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3905 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3906 (rev >= 0x03)) { /* eDp v1.4 or higher */
3907 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3910 intel_dp_dpcd_read_wake(&intel_dp->aux,
3911 DP_SUPPORTED_LINK_RATES,
3913 sizeof(sink_rates));
3915 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3916 int val = le16_to_cpu(sink_rates[i]);
3921 /* Value read is in kHz while drm clock is saved in deca-kHz */
3922 intel_dp->sink_rates[i] = (val * 200) / 10;
3924 intel_dp->num_sink_rates = i;
3927 intel_dp_print_rates(intel_dp);
3929 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3930 DP_DWN_STRM_PORT_PRESENT))
3931 return true; /* native DP sink */
3933 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3934 return true; /* no per-port downstream info */
3936 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3937 intel_dp->downstream_ports,
3938 DP_MAX_DOWNSTREAM_PORTS) < 0)
3939 return false; /* downstream port status fetch failed */
3945 intel_dp_probe_oui(struct intel_dp *intel_dp)
3949 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3952 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3953 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3954 buf[0], buf[1], buf[2]);
3956 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3957 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3958 buf[0], buf[1], buf[2]);
3962 intel_dp_probe_mst(struct intel_dp *intel_dp)
3966 if (!intel_dp->can_mst)
3969 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3972 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3973 if (buf[0] & DP_MST_CAP) {
3974 DRM_DEBUG_KMS("Sink is MST capable\n");
3975 intel_dp->is_mst = true;
3977 DRM_DEBUG_KMS("Sink is not MST capable\n");
3978 intel_dp->is_mst = false;
3982 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3983 return intel_dp->is_mst;
3986 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
3988 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3989 struct drm_device *dev = dig_port->base.base.dev;
3990 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3996 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3997 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4002 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4003 buf & ~DP_TEST_SINK_START) < 0) {
4004 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4010 intel_wait_for_vblank(dev, intel_crtc->pipe);
4012 if (drm_dp_dpcd_readb(&intel_dp->aux,
4013 DP_TEST_SINK_MISC, &buf) < 0) {
4017 count = buf & DP_TEST_COUNT_MASK;
4018 } while (--attempts && count);
4020 if (attempts == 0) {
4021 DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
4026 hsw_enable_ips(intel_crtc);
4030 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4032 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4033 struct drm_device *dev = dig_port->base.base.dev;
4034 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4038 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4041 if (!(buf & DP_TEST_CRC_SUPPORTED))
4044 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4047 if (buf & DP_TEST_SINK_START) {
4048 ret = intel_dp_sink_crc_stop(intel_dp);
4053 hsw_disable_ips(intel_crtc);
4055 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4056 buf | DP_TEST_SINK_START) < 0) {
4057 hsw_enable_ips(intel_crtc);
4061 intel_wait_for_vblank(dev, intel_crtc->pipe);
4065 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4067 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4068 struct drm_device *dev = dig_port->base.base.dev;
4069 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4074 ret = intel_dp_sink_crc_start(intel_dp);
4079 intel_wait_for_vblank(dev, intel_crtc->pipe);
4081 if (drm_dp_dpcd_readb(&intel_dp->aux,
4082 DP_TEST_SINK_MISC, &buf) < 0) {
4086 count = buf & DP_TEST_COUNT_MASK;
4088 } while (--attempts && count == 0);
4090 if (attempts == 0) {
4091 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4096 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4102 intel_dp_sink_crc_stop(intel_dp);
4107 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4109 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4110 DP_DEVICE_SERVICE_IRQ_VECTOR,
4111 sink_irq_vector, 1) == 1;
4115 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4119 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4121 sink_irq_vector, 14);
4128 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4130 uint8_t test_result = DP_TEST_ACK;
4134 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4136 uint8_t test_result = DP_TEST_NAK;
4140 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4142 uint8_t test_result = DP_TEST_NAK;
4143 struct intel_connector *intel_connector = intel_dp->attached_connector;
4144 struct drm_connector *connector = &intel_connector->base;
4146 if (intel_connector->detect_edid == NULL ||
4147 connector->edid_corrupt ||
4148 intel_dp->aux.i2c_defer_count > 6) {
4149 /* Check EDID read for NACKs, DEFERs and corruption
4150 * (DP CTS 1.2 Core r1.1)
4151 * 4.2.2.4 : Failed EDID read, I2C_NAK
4152 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4153 * 4.2.2.6 : EDID corruption detected
4154 * Use failsafe mode for all cases
4156 if (intel_dp->aux.i2c_nack_count > 0 ||
4157 intel_dp->aux.i2c_defer_count > 0)
4158 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4159 intel_dp->aux.i2c_nack_count,
4160 intel_dp->aux.i2c_defer_count);
4161 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4163 struct edid *block = intel_connector->detect_edid;
4165 /* We have to write the checksum
4166 * of the last block read
4168 block += intel_connector->detect_edid->extensions;
4170 if (!drm_dp_dpcd_write(&intel_dp->aux,
4171 DP_TEST_EDID_CHECKSUM,
4174 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4176 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4177 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4180 /* Set test active flag here so userspace doesn't interrupt things */
4181 intel_dp->compliance_test_active = 1;
4186 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4188 uint8_t test_result = DP_TEST_NAK;
4192 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4194 uint8_t response = DP_TEST_NAK;
4198 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4200 DRM_DEBUG_KMS("Could not read test request from sink\n");
4205 case DP_TEST_LINK_TRAINING:
4206 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4207 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4208 response = intel_dp_autotest_link_training(intel_dp);
4210 case DP_TEST_LINK_VIDEO_PATTERN:
4211 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4212 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4213 response = intel_dp_autotest_video_pattern(intel_dp);
4215 case DP_TEST_LINK_EDID_READ:
4216 DRM_DEBUG_KMS("EDID test requested\n");
4217 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4218 response = intel_dp_autotest_edid(intel_dp);
4220 case DP_TEST_LINK_PHY_TEST_PATTERN:
4221 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4222 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4223 response = intel_dp_autotest_phy_pattern(intel_dp);
4226 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4231 status = drm_dp_dpcd_write(&intel_dp->aux,
4235 DRM_DEBUG_KMS("Could not write test response to sink\n");
4239 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4243 if (intel_dp->is_mst) {
4248 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4252 /* check link status - esi[10] = 0x200c */
4253 if (intel_dp->active_mst_links &&
4254 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4255 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4256 intel_dp_start_link_train(intel_dp);
4257 intel_dp_stop_link_train(intel_dp);
4260 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4261 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4264 for (retry = 0; retry < 3; retry++) {
4266 wret = drm_dp_dpcd_write(&intel_dp->aux,
4267 DP_SINK_COUNT_ESI+1,
4274 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4276 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4284 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4285 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4286 intel_dp->is_mst = false;
4287 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4288 /* send a hotplug event */
4289 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4296 * According to DP spec
4299 * 2. Configure link according to Receiver Capabilities
4300 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4301 * 4. Check link status on receipt of hot-plug interrupt
4304 intel_dp_check_link_status(struct intel_dp *intel_dp)
4306 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4307 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4309 u8 link_status[DP_LINK_STATUS_SIZE];
4311 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4314 * Clearing compliance test variables to allow capturing
4315 * of values for next automated test request.
4317 intel_dp->compliance_test_active = 0;
4318 intel_dp->compliance_test_type = 0;
4319 intel_dp->compliance_test_data = 0;
4321 if (!intel_encoder->base.crtc)
4324 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4327 /* Try to read receiver status if the link appears to be up */
4328 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4332 /* Now read the DPCD to see if it's actually running */
4333 if (!intel_dp_get_dpcd(intel_dp)) {
4337 /* Try to read the source of the interrupt */
4338 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4339 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4340 /* Clear interrupt source */
4341 drm_dp_dpcd_writeb(&intel_dp->aux,
4342 DP_DEVICE_SERVICE_IRQ_VECTOR,
4345 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4346 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4347 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4348 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4351 /* if link training is requested we should perform it always */
4352 if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4353 (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
4354 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4355 intel_encoder->base.name);
4356 intel_dp_start_link_train(intel_dp);
4357 intel_dp_stop_link_train(intel_dp);
4361 /* XXX this is probably wrong for multiple downstream ports */
4362 static enum drm_connector_status
4363 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4365 uint8_t *dpcd = intel_dp->dpcd;
4368 if (!intel_dp_get_dpcd(intel_dp))
4369 return connector_status_disconnected;
4371 /* if there's no downstream port, we're done */
4372 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4373 return connector_status_connected;
4375 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4376 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4377 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4380 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4382 return connector_status_unknown;
4384 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4385 : connector_status_disconnected;
4388 /* If no HPD, poke DDC gently */
4389 if (drm_probe_ddc(&intel_dp->aux.ddc))
4390 return connector_status_connected;
4392 /* Well we tried, say unknown for unreliable port types */
4393 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4394 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4395 if (type == DP_DS_PORT_TYPE_VGA ||
4396 type == DP_DS_PORT_TYPE_NON_EDID)
4397 return connector_status_unknown;
4399 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4400 DP_DWN_STRM_PORT_TYPE_MASK;
4401 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4402 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4403 return connector_status_unknown;
4406 /* Anything else is out of spec, warn and ignore */
4407 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4408 return connector_status_disconnected;
4411 static enum drm_connector_status
4412 edp_detect(struct intel_dp *intel_dp)
4414 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4415 enum drm_connector_status status;
4417 status = intel_panel_detect(dev);
4418 if (status == connector_status_unknown)
4419 status = connector_status_connected;
4424 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4425 struct intel_digital_port *port)
4429 switch (port->port) {
4433 bit = SDE_PORTB_HOTPLUG;
4436 bit = SDE_PORTC_HOTPLUG;
4439 bit = SDE_PORTD_HOTPLUG;
4442 MISSING_CASE(port->port);
4446 return I915_READ(SDEISR) & bit;
4449 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4450 struct intel_digital_port *port)
4454 switch (port->port) {
4458 bit = SDE_PORTB_HOTPLUG_CPT;
4461 bit = SDE_PORTC_HOTPLUG_CPT;
4464 bit = SDE_PORTD_HOTPLUG_CPT;
4467 bit = SDE_PORTE_HOTPLUG_SPT;
4470 MISSING_CASE(port->port);
4474 return I915_READ(SDEISR) & bit;
4477 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4478 struct intel_digital_port *port)
4482 switch (port->port) {
4484 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4487 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4490 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4493 MISSING_CASE(port->port);
4497 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4500 static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4501 struct intel_digital_port *port)
4505 switch (port->port) {
4507 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4510 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4513 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4516 MISSING_CASE(port->port);
4520 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4523 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4524 struct intel_digital_port *intel_dig_port)
4526 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4530 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4533 bit = BXT_DE_PORT_HP_DDIA;
4536 bit = BXT_DE_PORT_HP_DDIB;
4539 bit = BXT_DE_PORT_HP_DDIC;
4546 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4550 * intel_digital_port_connected - is the specified port connected?
4551 * @dev_priv: i915 private structure
4552 * @port: the port to test
4554 * Return %true if @port is connected, %false otherwise.
4556 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4557 struct intel_digital_port *port)
4559 if (HAS_PCH_IBX(dev_priv))
4560 return ibx_digital_port_connected(dev_priv, port);
4561 else if (HAS_PCH_SPLIT(dev_priv))
4562 return cpt_digital_port_connected(dev_priv, port);
4563 else if (IS_BROXTON(dev_priv))
4564 return bxt_digital_port_connected(dev_priv, port);
4565 else if (IS_GM45(dev_priv))
4566 return gm45_digital_port_connected(dev_priv, port);
4568 return g4x_digital_port_connected(dev_priv, port);
4571 static struct edid *
4572 intel_dp_get_edid(struct intel_dp *intel_dp)
4574 struct intel_connector *intel_connector = intel_dp->attached_connector;
4576 /* use cached edid if we have one */
4577 if (intel_connector->edid) {
4579 if (IS_ERR(intel_connector->edid))
4582 return drm_edid_duplicate(intel_connector->edid);
4584 return drm_get_edid(&intel_connector->base,
4585 &intel_dp->aux.ddc);
4589 intel_dp_set_edid(struct intel_dp *intel_dp)
4591 struct intel_connector *intel_connector = intel_dp->attached_connector;
4594 edid = intel_dp_get_edid(intel_dp);
4595 intel_connector->detect_edid = edid;
4597 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4598 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4600 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4604 intel_dp_unset_edid(struct intel_dp *intel_dp)
4606 struct intel_connector *intel_connector = intel_dp->attached_connector;
4608 kfree(intel_connector->detect_edid);
4609 intel_connector->detect_edid = NULL;
4611 intel_dp->has_audio = false;
4614 static enum drm_connector_status
4615 intel_dp_detect(struct drm_connector *connector, bool force)
4617 struct intel_dp *intel_dp = intel_attached_dp(connector);
4618 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4619 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4620 struct drm_device *dev = connector->dev;
4621 enum drm_connector_status status;
4622 enum intel_display_power_domain power_domain;
4626 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4627 connector->base.id, connector->name);
4628 intel_dp_unset_edid(intel_dp);
4630 if (intel_dp->is_mst) {
4631 /* MST devices are disconnected from a monitor POV */
4632 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4633 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4634 return connector_status_disconnected;
4637 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4638 intel_display_power_get(to_i915(dev), power_domain);
4640 /* Can't disconnect eDP, but you can close the lid... */
4641 if (is_edp(intel_dp))
4642 status = edp_detect(intel_dp);
4643 else if (intel_digital_port_connected(to_i915(dev),
4644 dp_to_dig_port(intel_dp)))
4645 status = intel_dp_detect_dpcd(intel_dp);
4647 status = connector_status_disconnected;
4649 if (status != connector_status_connected) {
4650 intel_dp->compliance_test_active = 0;
4651 intel_dp->compliance_test_type = 0;
4652 intel_dp->compliance_test_data = 0;
4657 intel_dp_probe_oui(intel_dp);
4659 ret = intel_dp_probe_mst(intel_dp);
4661 /* if we are in MST mode then this connector
4662 won't appear connected or have anything with EDID on it */
4663 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4664 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4665 status = connector_status_disconnected;
4670 * Clearing NACK and defer counts to get their exact values
4671 * while reading EDID which are required by Compliance tests
4672 * 4.2.2.4 and 4.2.2.5
4674 intel_dp->aux.i2c_nack_count = 0;
4675 intel_dp->aux.i2c_defer_count = 0;
4677 intel_dp_set_edid(intel_dp);
4679 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4680 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4681 status = connector_status_connected;
4683 /* Try to read the source of the interrupt */
4684 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4685 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4686 /* Clear interrupt source */
4687 drm_dp_dpcd_writeb(&intel_dp->aux,
4688 DP_DEVICE_SERVICE_IRQ_VECTOR,
4691 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4692 intel_dp_handle_test_request(intel_dp);
4693 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4694 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4698 intel_display_power_put(to_i915(dev), power_domain);
4703 intel_dp_force(struct drm_connector *connector)
4705 struct intel_dp *intel_dp = intel_attached_dp(connector);
4706 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4707 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4708 enum intel_display_power_domain power_domain;
4710 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4711 connector->base.id, connector->name);
4712 intel_dp_unset_edid(intel_dp);
4714 if (connector->status != connector_status_connected)
4717 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4718 intel_display_power_get(dev_priv, power_domain);
4720 intel_dp_set_edid(intel_dp);
4722 intel_display_power_put(dev_priv, power_domain);
4724 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4725 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4728 static int intel_dp_get_modes(struct drm_connector *connector)
4730 struct intel_connector *intel_connector = to_intel_connector(connector);
4733 edid = intel_connector->detect_edid;
4735 int ret = intel_connector_update_modes(connector, edid);
4740 /* if eDP has no EDID, fall back to fixed mode */
4741 if (is_edp(intel_attached_dp(connector)) &&
4742 intel_connector->panel.fixed_mode) {
4743 struct drm_display_mode *mode;
4745 mode = drm_mode_duplicate(connector->dev,
4746 intel_connector->panel.fixed_mode);
4748 drm_mode_probed_add(connector, mode);
4757 intel_dp_detect_audio(struct drm_connector *connector)
4759 bool has_audio = false;
4762 edid = to_intel_connector(connector)->detect_edid;
4764 has_audio = drm_detect_monitor_audio(edid);
4770 intel_dp_set_property(struct drm_connector *connector,
4771 struct drm_property *property,
4774 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4775 struct intel_connector *intel_connector = to_intel_connector(connector);
4776 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4777 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4780 ret = drm_object_property_set_value(&connector->base, property, val);
4784 if (property == dev_priv->force_audio_property) {
4788 if (i == intel_dp->force_audio)
4791 intel_dp->force_audio = i;
4793 if (i == HDMI_AUDIO_AUTO)
4794 has_audio = intel_dp_detect_audio(connector);
4796 has_audio = (i == HDMI_AUDIO_ON);
4798 if (has_audio == intel_dp->has_audio)
4801 intel_dp->has_audio = has_audio;
4805 if (property == dev_priv->broadcast_rgb_property) {
4806 bool old_auto = intel_dp->color_range_auto;
4807 bool old_range = intel_dp->limited_color_range;
4810 case INTEL_BROADCAST_RGB_AUTO:
4811 intel_dp->color_range_auto = true;
4813 case INTEL_BROADCAST_RGB_FULL:
4814 intel_dp->color_range_auto = false;
4815 intel_dp->limited_color_range = false;
4817 case INTEL_BROADCAST_RGB_LIMITED:
4818 intel_dp->color_range_auto = false;
4819 intel_dp->limited_color_range = true;
4825 if (old_auto == intel_dp->color_range_auto &&
4826 old_range == intel_dp->limited_color_range)
4832 if (is_edp(intel_dp) &&
4833 property == connector->dev->mode_config.scaling_mode_property) {
4834 if (val == DRM_MODE_SCALE_NONE) {
4835 DRM_DEBUG_KMS("no scaling not supported\n");
4839 if (intel_connector->panel.fitting_mode == val) {
4840 /* the eDP scaling property is not changed */
4843 intel_connector->panel.fitting_mode = val;
4851 if (intel_encoder->base.crtc)
4852 intel_crtc_restore_mode(intel_encoder->base.crtc);
4858 intel_dp_connector_destroy(struct drm_connector *connector)
4860 struct intel_connector *intel_connector = to_intel_connector(connector);
4862 kfree(intel_connector->detect_edid);
4864 if (!IS_ERR_OR_NULL(intel_connector->edid))
4865 kfree(intel_connector->edid);
4867 /* Can't call is_edp() since the encoder may have been destroyed
4869 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4870 intel_panel_fini(&intel_connector->panel);
4872 drm_connector_cleanup(connector);
4876 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4878 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4879 struct intel_dp *intel_dp = &intel_dig_port->dp;
4881 intel_dp_mst_encoder_cleanup(intel_dig_port);
4882 if (is_edp(intel_dp)) {
4883 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4885 * vdd might still be enabled do to the delayed vdd off.
4886 * Make sure vdd is actually turned off here.
4889 edp_panel_vdd_off_sync(intel_dp);
4890 pps_unlock(intel_dp);
4892 if (intel_dp->edp_notifier.notifier_call) {
4893 unregister_reboot_notifier(&intel_dp->edp_notifier);
4894 intel_dp->edp_notifier.notifier_call = NULL;
4897 drm_encoder_cleanup(encoder);
4898 kfree(intel_dig_port);
4901 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4903 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4905 if (!is_edp(intel_dp))
4909 * vdd might still be enabled do to the delayed vdd off.
4910 * Make sure vdd is actually turned off here.
4912 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4914 edp_panel_vdd_off_sync(intel_dp);
4915 pps_unlock(intel_dp);
4918 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4920 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4921 struct drm_device *dev = intel_dig_port->base.base.dev;
4922 struct drm_i915_private *dev_priv = dev->dev_private;
4923 enum intel_display_power_domain power_domain;
4925 lockdep_assert_held(&dev_priv->pps_mutex);
4927 if (!edp_have_panel_vdd(intel_dp))
4931 * The VDD bit needs a power domain reference, so if the bit is
4932 * already enabled when we boot or resume, grab this reference and
4933 * schedule a vdd off, so we don't hold on to the reference
4936 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4937 power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
4938 intel_display_power_get(dev_priv, power_domain);
4940 edp_panel_vdd_schedule_off(intel_dp);
4943 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4945 struct intel_dp *intel_dp;
4947 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4950 intel_dp = enc_to_intel_dp(encoder);
4955 * Read out the current power sequencer assignment,
4956 * in case the BIOS did something with it.
4958 if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
4959 vlv_initial_power_sequencer_setup(intel_dp);
4961 intel_edp_panel_vdd_sanitize(intel_dp);
4963 pps_unlock(intel_dp);
4966 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4967 .dpms = drm_atomic_helper_connector_dpms,
4968 .detect = intel_dp_detect,
4969 .force = intel_dp_force,
4970 .fill_modes = drm_helper_probe_single_connector_modes,
4971 .set_property = intel_dp_set_property,
4972 .atomic_get_property = intel_connector_atomic_get_property,
4973 .destroy = intel_dp_connector_destroy,
4974 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4975 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4978 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4979 .get_modes = intel_dp_get_modes,
4980 .mode_valid = intel_dp_mode_valid,
4981 .best_encoder = intel_best_encoder,
4984 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4985 .reset = intel_dp_encoder_reset,
4986 .destroy = intel_dp_encoder_destroy,
4990 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4992 struct intel_dp *intel_dp = &intel_dig_port->dp;
4993 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4994 struct drm_device *dev = intel_dig_port->base.base.dev;
4995 struct drm_i915_private *dev_priv = dev->dev_private;
4996 enum intel_display_power_domain power_domain;
4997 enum irqreturn ret = IRQ_NONE;
4999 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5000 intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
5001 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5003 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5005 * vdd off can generate a long pulse on eDP which
5006 * would require vdd on to handle it, and thus we
5007 * would end up in an endless cycle of
5008 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5010 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5011 port_name(intel_dig_port->port));
5015 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5016 port_name(intel_dig_port->port),
5017 long_hpd ? "long" : "short");
5019 power_domain = intel_display_port_aux_power_domain(intel_encoder);
5020 intel_display_power_get(dev_priv, power_domain);
5023 /* indicate that we need to restart link training */
5024 intel_dp->train_set_valid = false;
5026 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5029 if (!intel_dp_get_dpcd(intel_dp)) {
5033 intel_dp_probe_oui(intel_dp);
5035 if (!intel_dp_probe_mst(intel_dp)) {
5036 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5037 intel_dp_check_link_status(intel_dp);
5038 drm_modeset_unlock(&dev->mode_config.connection_mutex);
5042 if (intel_dp->is_mst) {
5043 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5047 if (!intel_dp->is_mst) {
5048 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5049 intel_dp_check_link_status(intel_dp);
5050 drm_modeset_unlock(&dev->mode_config.connection_mutex);
5058 /* if we were in MST mode, and device is not there get out of MST mode */
5059 if (intel_dp->is_mst) {
5060 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5061 intel_dp->is_mst = false;
5062 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5065 intel_display_power_put(dev_priv, power_domain);
5070 /* check the VBT to see whether the eDP is on another port */
5071 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5073 struct drm_i915_private *dev_priv = dev->dev_private;
5074 union child_device_config *p_child;
5076 static const short port_mapping[] = {
5077 [PORT_B] = DVO_PORT_DPB,
5078 [PORT_C] = DVO_PORT_DPC,
5079 [PORT_D] = DVO_PORT_DPD,
5080 [PORT_E] = DVO_PORT_DPE,
5084 * eDP not supported on g4x. so bail out early just
5085 * for a bit extra safety in case the VBT is bonkers.
5087 if (INTEL_INFO(dev)->gen < 5)
5093 if (!dev_priv->vbt.child_dev_num)
5096 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5097 p_child = dev_priv->vbt.child_dev + i;
5099 if (p_child->common.dvo_port == port_mapping[port] &&
5100 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5101 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5108 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5110 struct intel_connector *intel_connector = to_intel_connector(connector);
5112 intel_attach_force_audio_property(connector);
5113 intel_attach_broadcast_rgb_property(connector);
5114 intel_dp->color_range_auto = true;
5116 if (is_edp(intel_dp)) {
5117 drm_mode_create_scaling_mode_property(connector->dev);
5118 drm_object_attach_property(
5120 connector->dev->mode_config.scaling_mode_property,
5121 DRM_MODE_SCALE_ASPECT);
5122 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5126 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5128 intel_dp->panel_power_off_time = ktime_get_boottime();
5129 intel_dp->last_power_on = jiffies;
5130 intel_dp->last_backlight_off = jiffies;
5134 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5135 struct intel_dp *intel_dp)
5137 struct drm_i915_private *dev_priv = dev->dev_private;
5138 struct edp_power_seq cur, vbt, spec,
5139 *final = &intel_dp->pps_delays;
5140 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5141 i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5143 lockdep_assert_held(&dev_priv->pps_mutex);
5145 /* already initialized? */
5146 if (final->t11_t12 != 0)
5149 if (IS_BROXTON(dev)) {
5151 * TODO: BXT has 2 sets of PPS registers.
5152 * Correct Register for Broxton need to be identified
5153 * using VBT. hardcoding for now
5155 pp_ctrl_reg = BXT_PP_CONTROL(0);
5156 pp_on_reg = BXT_PP_ON_DELAYS(0);
5157 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5158 } else if (HAS_PCH_SPLIT(dev)) {
5159 pp_ctrl_reg = PCH_PP_CONTROL;
5160 pp_on_reg = PCH_PP_ON_DELAYS;
5161 pp_off_reg = PCH_PP_OFF_DELAYS;
5162 pp_div_reg = PCH_PP_DIVISOR;
5164 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5166 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5167 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5168 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5169 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5172 /* Workaround: Need to write PP_CONTROL with the unlock key as
5173 * the very first thing. */
5174 pp_ctl = ironlake_get_pp_control(intel_dp);
5176 pp_on = I915_READ(pp_on_reg);
5177 pp_off = I915_READ(pp_off_reg);
5178 if (!IS_BROXTON(dev)) {
5179 I915_WRITE(pp_ctrl_reg, pp_ctl);
5180 pp_div = I915_READ(pp_div_reg);
5183 /* Pull timing values out of registers */
5184 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5185 PANEL_POWER_UP_DELAY_SHIFT;
5187 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5188 PANEL_LIGHT_ON_DELAY_SHIFT;
5190 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5191 PANEL_LIGHT_OFF_DELAY_SHIFT;
5193 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5194 PANEL_POWER_DOWN_DELAY_SHIFT;
5196 if (IS_BROXTON(dev)) {
5197 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5198 BXT_POWER_CYCLE_DELAY_SHIFT;
5200 cur.t11_t12 = (tmp - 1) * 1000;
5204 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5205 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5208 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5209 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5211 vbt = dev_priv->vbt.edp_pps;
5213 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5214 * our hw here, which are all in 100usec. */
5215 spec.t1_t3 = 210 * 10;
5216 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5217 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5218 spec.t10 = 500 * 10;
5219 /* This one is special and actually in units of 100ms, but zero
5220 * based in the hw (so we need to add 100 ms). But the sw vbt
5221 * table multiplies it with 1000 to make it in units of 100usec,
5223 spec.t11_t12 = (510 + 100) * 10;
5225 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5226 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5228 /* Use the max of the register settings and vbt. If both are
5229 * unset, fall back to the spec limits. */
5230 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5232 max(cur.field, vbt.field))
5233 assign_final(t1_t3);
5237 assign_final(t11_t12);
5240 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5241 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5242 intel_dp->backlight_on_delay = get_delay(t8);
5243 intel_dp->backlight_off_delay = get_delay(t9);
5244 intel_dp->panel_power_down_delay = get_delay(t10);
5245 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5248 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5249 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5250 intel_dp->panel_power_cycle_delay);
5252 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5253 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5257 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5258 struct intel_dp *intel_dp)
5260 struct drm_i915_private *dev_priv = dev->dev_private;
5261 u32 pp_on, pp_off, pp_div, port_sel = 0;
5262 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5263 i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
5264 enum port port = dp_to_dig_port(intel_dp)->port;
5265 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5267 lockdep_assert_held(&dev_priv->pps_mutex);
5269 if (IS_BROXTON(dev)) {
5271 * TODO: BXT has 2 sets of PPS registers.
5272 * Correct Register for Broxton need to be identified
5273 * using VBT. hardcoding for now
5275 pp_ctrl_reg = BXT_PP_CONTROL(0);
5276 pp_on_reg = BXT_PP_ON_DELAYS(0);
5277 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5279 } else if (HAS_PCH_SPLIT(dev)) {
5280 pp_on_reg = PCH_PP_ON_DELAYS;
5281 pp_off_reg = PCH_PP_OFF_DELAYS;
5282 pp_div_reg = PCH_PP_DIVISOR;
5284 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5286 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5287 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5288 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5292 * And finally store the new values in the power sequencer. The
5293 * backlight delays are set to 1 because we do manual waits on them. For
5294 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5295 * we'll end up waiting for the backlight off delay twice: once when we
5296 * do the manual sleep, and once when we disable the panel and wait for
5297 * the PP_STATUS bit to become zero.
5299 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5300 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5301 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5302 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5303 /* Compute the divisor for the pp clock, simply match the Bspec
5305 if (IS_BROXTON(dev)) {
5306 pp_div = I915_READ(pp_ctrl_reg);
5307 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5308 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5309 << BXT_POWER_CYCLE_DELAY_SHIFT);
5311 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5312 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5313 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5316 /* Haswell doesn't have any port selection bits for the panel
5317 * power sequencer any more. */
5318 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5319 port_sel = PANEL_PORT_SELECT_VLV(port);
5320 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5322 port_sel = PANEL_PORT_SELECT_DPA;
5324 port_sel = PANEL_PORT_SELECT_DPD;
5329 I915_WRITE(pp_on_reg, pp_on);
5330 I915_WRITE(pp_off_reg, pp_off);
5331 if (IS_BROXTON(dev))
5332 I915_WRITE(pp_ctrl_reg, pp_div);
5334 I915_WRITE(pp_div_reg, pp_div);
5336 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5337 I915_READ(pp_on_reg),
5338 I915_READ(pp_off_reg),
5340 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5341 I915_READ(pp_div_reg));
5345 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5347 * @refresh_rate: RR to be programmed
5349 * This function gets called when refresh rate (RR) has to be changed from
5350 * one frequency to another. Switches can be between high and low RR
5351 * supported by the panel or to any other RR based on media playback (in
5352 * this case, RR value needs to be passed from user space).
5354 * The caller of this function needs to take a lock on dev_priv->drrs.
5356 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5358 struct drm_i915_private *dev_priv = dev->dev_private;
5359 struct intel_encoder *encoder;
5360 struct intel_digital_port *dig_port = NULL;
5361 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5362 struct intel_crtc_state *config = NULL;
5363 struct intel_crtc *intel_crtc = NULL;
5364 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5366 if (refresh_rate <= 0) {
5367 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5371 if (intel_dp == NULL) {
5372 DRM_DEBUG_KMS("DRRS not supported.\n");
5377 * FIXME: This needs proper synchronization with psr state for some
5378 * platforms that cannot have PSR and DRRS enabled at the same time.
5381 dig_port = dp_to_dig_port(intel_dp);
5382 encoder = &dig_port->base;
5383 intel_crtc = to_intel_crtc(encoder->base.crtc);
5386 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5390 config = intel_crtc->config;
5392 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5393 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5397 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5399 index = DRRS_LOW_RR;
5401 if (index == dev_priv->drrs.refresh_rate_type) {
5403 "DRRS requested for previously set RR...ignoring\n");
5407 if (!intel_crtc->active) {
5408 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5412 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5415 intel_dp_set_m_n(intel_crtc, M1_N1);
5418 intel_dp_set_m_n(intel_crtc, M2_N2);
5422 DRM_ERROR("Unsupported refreshrate type\n");
5424 } else if (INTEL_INFO(dev)->gen > 6) {
5425 i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5428 val = I915_READ(reg);
5429 if (index > DRRS_HIGH_RR) {
5430 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5431 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5433 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5435 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5436 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5438 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5440 I915_WRITE(reg, val);
5443 dev_priv->drrs.refresh_rate_type = index;
5445 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5449 * intel_edp_drrs_enable - init drrs struct if supported
5450 * @intel_dp: DP struct
5452 * Initializes frontbuffer_bits and drrs.dp
5454 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5456 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5457 struct drm_i915_private *dev_priv = dev->dev_private;
5458 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5459 struct drm_crtc *crtc = dig_port->base.base.crtc;
5460 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5462 if (!intel_crtc->config->has_drrs) {
5463 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5467 mutex_lock(&dev_priv->drrs.mutex);
5468 if (WARN_ON(dev_priv->drrs.dp)) {
5469 DRM_ERROR("DRRS already enabled\n");
5473 dev_priv->drrs.busy_frontbuffer_bits = 0;
5475 dev_priv->drrs.dp = intel_dp;
5478 mutex_unlock(&dev_priv->drrs.mutex);
5482 * intel_edp_drrs_disable - Disable DRRS
5483 * @intel_dp: DP struct
5486 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5488 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5489 struct drm_i915_private *dev_priv = dev->dev_private;
5490 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5491 struct drm_crtc *crtc = dig_port->base.base.crtc;
5492 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5494 if (!intel_crtc->config->has_drrs)
5497 mutex_lock(&dev_priv->drrs.mutex);
5498 if (!dev_priv->drrs.dp) {
5499 mutex_unlock(&dev_priv->drrs.mutex);
5503 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5504 intel_dp_set_drrs_state(dev_priv->dev,
5505 intel_dp->attached_connector->panel.
5506 fixed_mode->vrefresh);
5508 dev_priv->drrs.dp = NULL;
5509 mutex_unlock(&dev_priv->drrs.mutex);
5511 cancel_delayed_work_sync(&dev_priv->drrs.work);
5514 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5516 struct drm_i915_private *dev_priv =
5517 container_of(work, typeof(*dev_priv), drrs.work.work);
5518 struct intel_dp *intel_dp;
5520 mutex_lock(&dev_priv->drrs.mutex);
5522 intel_dp = dev_priv->drrs.dp;
5528 * The delayed work can race with an invalidate hence we need to
5532 if (dev_priv->drrs.busy_frontbuffer_bits)
5535 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5536 intel_dp_set_drrs_state(dev_priv->dev,
5537 intel_dp->attached_connector->panel.
5538 downclock_mode->vrefresh);
5541 mutex_unlock(&dev_priv->drrs.mutex);
5545 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5547 * @frontbuffer_bits: frontbuffer plane tracking bits
5549 * This function gets called everytime rendering on the given planes start.
5550 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5552 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5554 void intel_edp_drrs_invalidate(struct drm_device *dev,
5555 unsigned frontbuffer_bits)
5557 struct drm_i915_private *dev_priv = dev->dev_private;
5558 struct drm_crtc *crtc;
5561 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5564 cancel_delayed_work(&dev_priv->drrs.work);
5566 mutex_lock(&dev_priv->drrs.mutex);
5567 if (!dev_priv->drrs.dp) {
5568 mutex_unlock(&dev_priv->drrs.mutex);
5572 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5573 pipe = to_intel_crtc(crtc)->pipe;
5575 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5576 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5578 /* invalidate means busy screen hence upclock */
5579 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5580 intel_dp_set_drrs_state(dev_priv->dev,
5581 dev_priv->drrs.dp->attached_connector->panel.
5582 fixed_mode->vrefresh);
5584 mutex_unlock(&dev_priv->drrs.mutex);
5588 * intel_edp_drrs_flush - Restart Idleness DRRS
5590 * @frontbuffer_bits: frontbuffer plane tracking bits
5592 * This function gets called every time rendering on the given planes has
5593 * completed or flip on a crtc is completed. So DRRS should be upclocked
5594 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5595 * if no other planes are dirty.
5597 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5599 void intel_edp_drrs_flush(struct drm_device *dev,
5600 unsigned frontbuffer_bits)
5602 struct drm_i915_private *dev_priv = dev->dev_private;
5603 struct drm_crtc *crtc;
5606 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5609 cancel_delayed_work(&dev_priv->drrs.work);
5611 mutex_lock(&dev_priv->drrs.mutex);
5612 if (!dev_priv->drrs.dp) {
5613 mutex_unlock(&dev_priv->drrs.mutex);
5617 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5618 pipe = to_intel_crtc(crtc)->pipe;
5620 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5621 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5623 /* flush means busy screen hence upclock */
5624 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5625 intel_dp_set_drrs_state(dev_priv->dev,
5626 dev_priv->drrs.dp->attached_connector->panel.
5627 fixed_mode->vrefresh);
5630 * flush also means no more activity hence schedule downclock, if all
5631 * other fbs are quiescent too
5633 if (!dev_priv->drrs.busy_frontbuffer_bits)
5634 schedule_delayed_work(&dev_priv->drrs.work,
5635 msecs_to_jiffies(1000));
5636 mutex_unlock(&dev_priv->drrs.mutex);
5640 * DOC: Display Refresh Rate Switching (DRRS)
5642 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5643 * which enables swtching between low and high refresh rates,
5644 * dynamically, based on the usage scenario. This feature is applicable
5645 * for internal panels.
5647 * Indication that the panel supports DRRS is given by the panel EDID, which
5648 * would list multiple refresh rates for one resolution.
5650 * DRRS is of 2 types - static and seamless.
5651 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5652 * (may appear as a blink on screen) and is used in dock-undock scenario.
5653 * Seamless DRRS involves changing RR without any visual effect to the user
5654 * and can be used during normal system usage. This is done by programming
5655 * certain registers.
5657 * Support for static/seamless DRRS may be indicated in the VBT based on
5658 * inputs from the panel spec.
5660 * DRRS saves power by switching to low RR based on usage scenarios.
5663 * The implementation is based on frontbuffer tracking implementation.
5664 * When there is a disturbance on the screen triggered by user activity or a
5665 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5666 * When there is no movement on screen, after a timeout of 1 second, a switch
5667 * to low RR is made.
5668 * For integration with frontbuffer tracking code,
5669 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5671 * DRRS can be further extended to support other internal panels and also
5672 * the scenario of video playback wherein RR is set based on the rate
5673 * requested by userspace.
5677 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5678 * @intel_connector: eDP connector
5679 * @fixed_mode: preferred mode of panel
5681 * This function is called only once at driver load to initialize basic
5685 * Downclock mode if panel supports it, else return NULL.
5686 * DRRS support is determined by the presence of downclock mode (apart
5687 * from VBT setting).
5689 static struct drm_display_mode *
5690 intel_dp_drrs_init(struct intel_connector *intel_connector,
5691 struct drm_display_mode *fixed_mode)
5693 struct drm_connector *connector = &intel_connector->base;
5694 struct drm_device *dev = connector->dev;
5695 struct drm_i915_private *dev_priv = dev->dev_private;
5696 struct drm_display_mode *downclock_mode = NULL;
5698 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5699 mutex_init(&dev_priv->drrs.mutex);
5701 if (INTEL_INFO(dev)->gen <= 6) {
5702 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5706 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5707 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5711 downclock_mode = intel_find_panel_downclock
5712 (dev, fixed_mode, connector);
5714 if (!downclock_mode) {
5715 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5719 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5721 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5722 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5723 return downclock_mode;
5726 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5727 struct intel_connector *intel_connector)
5729 struct drm_connector *connector = &intel_connector->base;
5730 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5731 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5732 struct drm_device *dev = intel_encoder->base.dev;
5733 struct drm_i915_private *dev_priv = dev->dev_private;
5734 struct drm_display_mode *fixed_mode = NULL;
5735 struct drm_display_mode *downclock_mode = NULL;
5737 struct drm_display_mode *scan;
5739 enum pipe pipe = INVALID_PIPE;
5741 if (!is_edp(intel_dp))
5745 intel_edp_panel_vdd_sanitize(intel_dp);
5746 pps_unlock(intel_dp);
5748 /* Cache DPCD and EDID for edp. */
5749 has_dpcd = intel_dp_get_dpcd(intel_dp);
5752 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5753 dev_priv->no_aux_handshake =
5754 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5755 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5757 /* if this fails, presume the device is a ghost */
5758 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5762 /* We now know it's not a ghost, init power sequence regs. */
5764 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5765 pps_unlock(intel_dp);
5767 mutex_lock(&dev->mode_config.mutex);
5768 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5770 if (drm_add_edid_modes(connector, edid)) {
5771 drm_mode_connector_update_edid_property(connector,
5773 drm_edid_to_eld(connector, edid);
5776 edid = ERR_PTR(-EINVAL);
5779 edid = ERR_PTR(-ENOENT);
5781 intel_connector->edid = edid;
5783 /* prefer fixed mode from EDID if available */
5784 list_for_each_entry(scan, &connector->probed_modes, head) {
5785 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5786 fixed_mode = drm_mode_duplicate(dev, scan);
5787 downclock_mode = intel_dp_drrs_init(
5788 intel_connector, fixed_mode);
5793 /* fallback to VBT if available for eDP */
5794 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5795 fixed_mode = drm_mode_duplicate(dev,
5796 dev_priv->vbt.lfp_lvds_vbt_mode);
5798 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5800 mutex_unlock(&dev->mode_config.mutex);
5802 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5803 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5804 register_reboot_notifier(&intel_dp->edp_notifier);
5807 * Figure out the current pipe for the initial backlight setup.
5808 * If the current pipe isn't valid, try the PPS pipe, and if that
5809 * fails just assume pipe A.
5811 if (IS_CHERRYVIEW(dev))
5812 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5814 pipe = PORT_TO_PIPE(intel_dp->DP);
5816 if (pipe != PIPE_A && pipe != PIPE_B)
5817 pipe = intel_dp->pps_pipe;
5819 if (pipe != PIPE_A && pipe != PIPE_B)
5822 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5826 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5827 intel_connector->panel.backlight.power = intel_edp_backlight_power;
5828 intel_panel_setup_backlight(connector, pipe);
5834 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5835 struct intel_connector *intel_connector)
5837 struct drm_connector *connector = &intel_connector->base;
5838 struct intel_dp *intel_dp = &intel_dig_port->dp;
5839 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5840 struct drm_device *dev = intel_encoder->base.dev;
5841 struct drm_i915_private *dev_priv = dev->dev_private;
5842 enum port port = intel_dig_port->port;
5845 if (WARN(intel_dig_port->max_lanes < 1,
5846 "Not enough lanes (%d) for DP on port %c\n",
5847 intel_dig_port->max_lanes, port_name(port)))
5850 intel_dp->pps_pipe = INVALID_PIPE;
5852 /* intel_dp vfuncs */
5853 if (INTEL_INFO(dev)->gen >= 9)
5854 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5855 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5856 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5857 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5858 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5859 else if (HAS_PCH_SPLIT(dev))
5860 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5862 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5864 if (INTEL_INFO(dev)->gen >= 9)
5865 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5867 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5870 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5872 /* Preserve the current hw state. */
5873 intel_dp->DP = I915_READ(intel_dp->output_reg);
5874 intel_dp->attached_connector = intel_connector;
5876 if (intel_dp_is_edp(dev, port))
5877 type = DRM_MODE_CONNECTOR_eDP;
5879 type = DRM_MODE_CONNECTOR_DisplayPort;
5882 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5883 * for DP the encoder type can be set by the caller to
5884 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5886 if (type == DRM_MODE_CONNECTOR_eDP)
5887 intel_encoder->type = INTEL_OUTPUT_EDP;
5889 /* eDP only on port B and/or C on vlv/chv */
5890 if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5891 is_edp(intel_dp) && port != PORT_B && port != PORT_C))
5894 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5895 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5898 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5899 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5901 connector->interlace_allowed = true;
5902 connector->doublescan_allowed = 0;
5904 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5905 edp_panel_vdd_work);
5907 intel_connector_attach_encoder(intel_connector, intel_encoder);
5908 drm_connector_register(connector);
5911 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5913 intel_connector->get_hw_state = intel_connector_get_hw_state;
5914 intel_connector->unregister = intel_dp_connector_unregister;
5916 /* Set up the hotplug pin. */
5919 intel_encoder->hpd_pin = HPD_PORT_A;
5922 intel_encoder->hpd_pin = HPD_PORT_B;
5923 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
5924 intel_encoder->hpd_pin = HPD_PORT_A;
5927 intel_encoder->hpd_pin = HPD_PORT_C;
5930 intel_encoder->hpd_pin = HPD_PORT_D;
5933 intel_encoder->hpd_pin = HPD_PORT_E;
5939 if (is_edp(intel_dp)) {
5941 intel_dp_init_panel_power_timestamps(intel_dp);
5942 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5943 vlv_initial_power_sequencer_setup(intel_dp);
5945 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5946 pps_unlock(intel_dp);
5949 ret = intel_dp_aux_init(intel_dp, intel_connector);
5953 /* init MST on ports that can support it */
5954 if (HAS_DP_MST(dev) &&
5955 (port == PORT_B || port == PORT_C || port == PORT_D))
5956 intel_dp_mst_encoder_init(intel_dig_port,
5957 intel_connector->base.base.id);
5959 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5960 intel_dp_aux_fini(intel_dp);
5961 intel_dp_mst_encoder_cleanup(intel_dig_port);
5965 intel_dp_add_properties(intel_dp, connector);
5967 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5968 * 0xd. Failure to do so will result in spurious interrupts being
5969 * generated on the port when a cable is not attached.
5971 if (IS_G4X(dev) && !IS_GM45(dev)) {
5972 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5973 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5976 i915_debugfs_connector_add(connector);
5981 if (is_edp(intel_dp)) {
5982 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5984 * vdd might still be enabled do to the delayed vdd off.
5985 * Make sure vdd is actually turned off here.
5988 edp_panel_vdd_off_sync(intel_dp);
5989 pps_unlock(intel_dp);
5991 drm_connector_unregister(connector);
5992 drm_connector_cleanup(connector);
5998 intel_dp_init(struct drm_device *dev,
5999 i915_reg_t output_reg, enum port port)
6001 struct drm_i915_private *dev_priv = dev->dev_private;
6002 struct intel_digital_port *intel_dig_port;
6003 struct intel_encoder *intel_encoder;
6004 struct drm_encoder *encoder;
6005 struct intel_connector *intel_connector;
6007 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6008 if (!intel_dig_port)
6011 intel_connector = intel_connector_alloc();
6012 if (!intel_connector)
6013 goto err_connector_alloc;
6015 intel_encoder = &intel_dig_port->base;
6016 encoder = &intel_encoder->base;
6018 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6019 DRM_MODE_ENCODER_TMDS, NULL))
6020 goto err_encoder_init;
6022 intel_encoder->compute_config = intel_dp_compute_config;
6023 intel_encoder->disable = intel_disable_dp;
6024 intel_encoder->get_hw_state = intel_dp_get_hw_state;
6025 intel_encoder->get_config = intel_dp_get_config;
6026 intel_encoder->suspend = intel_dp_encoder_suspend;
6027 if (IS_CHERRYVIEW(dev)) {
6028 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6029 intel_encoder->pre_enable = chv_pre_enable_dp;
6030 intel_encoder->enable = vlv_enable_dp;
6031 intel_encoder->post_disable = chv_post_disable_dp;
6032 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6033 } else if (IS_VALLEYVIEW(dev)) {
6034 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6035 intel_encoder->pre_enable = vlv_pre_enable_dp;
6036 intel_encoder->enable = vlv_enable_dp;
6037 intel_encoder->post_disable = vlv_post_disable_dp;
6039 intel_encoder->pre_enable = g4x_pre_enable_dp;
6040 intel_encoder->enable = g4x_enable_dp;
6041 if (INTEL_INFO(dev)->gen >= 5)
6042 intel_encoder->post_disable = ilk_post_disable_dp;
6045 intel_dig_port->port = port;
6046 intel_dig_port->dp.output_reg = output_reg;
6047 intel_dig_port->max_lanes = 4;
6049 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6050 if (IS_CHERRYVIEW(dev)) {
6052 intel_encoder->crtc_mask = 1 << 2;
6054 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6056 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6058 intel_encoder->cloneable = 0;
6060 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6061 dev_priv->hotplug.irq_port[port] = intel_dig_port;
6063 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6064 goto err_init_connector;
6069 drm_encoder_cleanup(encoder);
6071 kfree(intel_connector);
6072 err_connector_alloc:
6073 kfree(intel_dig_port);
6078 void intel_dp_mst_suspend(struct drm_device *dev)
6080 struct drm_i915_private *dev_priv = dev->dev_private;
6084 for (i = 0; i < I915_MAX_PORTS; i++) {
6085 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6086 if (!intel_dig_port)
6089 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6090 if (!intel_dig_port->dp.can_mst)
6092 if (intel_dig_port->dp.is_mst)
6093 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6098 void intel_dp_mst_resume(struct drm_device *dev)
6100 struct drm_i915_private *dev_priv = dev->dev_private;
6103 for (i = 0; i < I915_MAX_PORTS; i++) {
6104 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6105 if (!intel_dig_port)
6107 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6110 if (!intel_dig_port->dp.can_mst)
6113 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6115 intel_dp_check_mst_status(&intel_dig_port->dp);