2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
44 /* Compliance test status bits */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
55 static const struct dp_link_dpll gen4_dpll[] = {
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
62 static const struct dp_link_dpll pch_dpll[] = {
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
69 static const struct dp_link_dpll vlv_dpll[] = {
71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
80 static const struct dp_link_dpll chv_dpll[] = {
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
86 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
94 static const int skl_rates[] = { 162000, 216000, 270000,
95 324000, 432000, 540000 };
96 static const int chv_rates[] = { 162000, 202500, 210000, 216000,
97 243000, 270000, 324000, 405000,
98 420000, 432000, 540000 };
99 static const int default_rates[] = { 162000, 270000, 540000 };
102 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
103 * @intel_dp: DP struct
105 * If a CPU or PCH DP output is attached to an eDP panel, this function
106 * will return true, and false otherwise.
108 static bool is_edp(struct intel_dp *intel_dp)
110 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
115 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
117 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
119 return intel_dig_port->base.base.dev;
122 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
124 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
127 static void intel_dp_link_down(struct intel_dp *intel_dp);
128 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
129 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
130 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
131 static void vlv_steal_power_sequencer(struct drm_device *dev,
135 intel_dp_max_link_bw(struct intel_dp *intel_dp)
137 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
139 switch (max_link_bw) {
140 case DP_LINK_BW_1_62:
145 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
147 max_link_bw = DP_LINK_BW_1_62;
153 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
155 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
156 struct drm_device *dev = intel_dig_port->base.base.dev;
157 u8 source_max, sink_max;
160 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
161 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
164 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
166 return min(source_max, sink_max);
170 * The units on the numbers in the next two are... bizarre. Examples will
171 * make it clearer; this one parallels an example in the eDP spec.
173 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
175 * 270000 * 1 * 8 / 10 == 216000
177 * The actual data capacity of that configuration is 2.16Gbit/s, so the
178 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
179 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
180 * 119000. At 18bpp that's 2142000 kilobits per second.
182 * Thus the strange-looking division by 10 in intel_dp_link_required, to
183 * get the result in decakilobits instead of kilobits.
187 intel_dp_link_required(int pixel_clock, int bpp)
189 return (pixel_clock * bpp + 9) / 10;
193 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
195 return (max_link_clock * max_lanes * 8) / 10;
198 static enum drm_mode_status
199 intel_dp_mode_valid(struct drm_connector *connector,
200 struct drm_display_mode *mode)
202 struct intel_dp *intel_dp = intel_attached_dp(connector);
203 struct intel_connector *intel_connector = to_intel_connector(connector);
204 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
205 int target_clock = mode->clock;
206 int max_rate, mode_rate, max_lanes, max_link_clock;
208 if (is_edp(intel_dp) && fixed_mode) {
209 if (mode->hdisplay > fixed_mode->hdisplay)
212 if (mode->vdisplay > fixed_mode->vdisplay)
215 target_clock = fixed_mode->clock;
218 max_link_clock = intel_dp_max_link_rate(intel_dp);
219 max_lanes = intel_dp_max_lane_count(intel_dp);
221 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
222 mode_rate = intel_dp_link_required(target_clock, 18);
224 if (mode_rate > max_rate)
225 return MODE_CLOCK_HIGH;
227 if (mode->clock < 10000)
228 return MODE_CLOCK_LOW;
230 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
231 return MODE_H_ILLEGAL;
236 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
243 for (i = 0; i < src_bytes; i++)
244 v |= ((uint32_t) src[i]) << ((3-i) * 8);
248 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
253 for (i = 0; i < dst_bytes; i++)
254 dst[i] = src >> ((3-i) * 8);
257 /* hrawclock is 1/4 the FSB frequency */
259 intel_hrawclk(struct drm_device *dev)
261 struct drm_i915_private *dev_priv = dev->dev_private;
264 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
265 if (IS_VALLEYVIEW(dev))
268 clkcfg = I915_READ(CLKCFG);
269 switch (clkcfg & CLKCFG_FSB_MASK) {
278 case CLKCFG_FSB_1067:
280 case CLKCFG_FSB_1333:
282 /* these two are just a guess; one of them might be right */
283 case CLKCFG_FSB_1600:
284 case CLKCFG_FSB_1600_ALT:
292 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
293 struct intel_dp *intel_dp);
295 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
296 struct intel_dp *intel_dp);
298 static void pps_lock(struct intel_dp *intel_dp)
300 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
301 struct intel_encoder *encoder = &intel_dig_port->base;
302 struct drm_device *dev = encoder->base.dev;
303 struct drm_i915_private *dev_priv = dev->dev_private;
304 enum intel_display_power_domain power_domain;
307 * See vlv_power_sequencer_reset() why we need
308 * a power domain reference here.
310 power_domain = intel_display_port_power_domain(encoder);
311 intel_display_power_get(dev_priv, power_domain);
313 mutex_lock(&dev_priv->pps_mutex);
316 static void pps_unlock(struct intel_dp *intel_dp)
318 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
319 struct intel_encoder *encoder = &intel_dig_port->base;
320 struct drm_device *dev = encoder->base.dev;
321 struct drm_i915_private *dev_priv = dev->dev_private;
322 enum intel_display_power_domain power_domain;
324 mutex_unlock(&dev_priv->pps_mutex);
326 power_domain = intel_display_port_power_domain(encoder);
327 intel_display_power_put(dev_priv, power_domain);
331 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
333 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
334 struct drm_device *dev = intel_dig_port->base.base.dev;
335 struct drm_i915_private *dev_priv = dev->dev_private;
336 enum pipe pipe = intel_dp->pps_pipe;
340 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
341 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
342 pipe_name(pipe), port_name(intel_dig_port->port)))
345 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
346 pipe_name(pipe), port_name(intel_dig_port->port));
348 /* Preserve the BIOS-computed detected bit. This is
349 * supposed to be read-only.
351 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
352 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
353 DP |= DP_PORT_WIDTH(1);
354 DP |= DP_LINK_TRAIN_PAT_1;
356 if (IS_CHERRYVIEW(dev))
357 DP |= DP_PIPE_SELECT_CHV(pipe);
358 else if (pipe == PIPE_B)
359 DP |= DP_PIPEB_SELECT;
361 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
364 * The DPLL for the pipe must be enabled for this to work.
365 * So enable temporarily it if it's not already enabled.
368 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
369 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
372 * Similar magic as in intel_dp_enable_port().
373 * We _must_ do this port enable + disable trick
374 * to make this power seqeuencer lock onto the port.
375 * Otherwise even VDD force bit won't work.
377 I915_WRITE(intel_dp->output_reg, DP);
378 POSTING_READ(intel_dp->output_reg);
380 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
381 POSTING_READ(intel_dp->output_reg);
383 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
384 POSTING_READ(intel_dp->output_reg);
387 vlv_force_pll_off(dev, pipe);
391 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
393 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
394 struct drm_device *dev = intel_dig_port->base.base.dev;
395 struct drm_i915_private *dev_priv = dev->dev_private;
396 struct intel_encoder *encoder;
397 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
400 lockdep_assert_held(&dev_priv->pps_mutex);
402 /* We should never land here with regular DP ports */
403 WARN_ON(!is_edp(intel_dp));
405 if (intel_dp->pps_pipe != INVALID_PIPE)
406 return intel_dp->pps_pipe;
409 * We don't have power sequencer currently.
410 * Pick one that's not used by other ports.
412 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
414 struct intel_dp *tmp;
416 if (encoder->type != INTEL_OUTPUT_EDP)
419 tmp = enc_to_intel_dp(&encoder->base);
421 if (tmp->pps_pipe != INVALID_PIPE)
422 pipes &= ~(1 << tmp->pps_pipe);
426 * Didn't find one. This should not happen since there
427 * are two power sequencers and up to two eDP ports.
429 if (WARN_ON(pipes == 0))
432 pipe = ffs(pipes) - 1;
434 vlv_steal_power_sequencer(dev, pipe);
435 intel_dp->pps_pipe = pipe;
437 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
438 pipe_name(intel_dp->pps_pipe),
439 port_name(intel_dig_port->port));
441 /* init power sequencer on this pipe and port */
442 intel_dp_init_panel_power_sequencer(dev, intel_dp);
443 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
446 * Even vdd force doesn't work until we've made
447 * the power sequencer lock in on the port.
449 vlv_power_sequencer_kick(intel_dp);
451 return intel_dp->pps_pipe;
454 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
457 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
460 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
463 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
466 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
469 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
476 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
478 vlv_pipe_check pipe_check)
482 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
483 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
484 PANEL_PORT_SELECT_MASK;
486 if (port_sel != PANEL_PORT_SELECT_VLV(port))
489 if (!pipe_check(dev_priv, pipe))
499 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
501 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
502 struct drm_device *dev = intel_dig_port->base.base.dev;
503 struct drm_i915_private *dev_priv = dev->dev_private;
504 enum port port = intel_dig_port->port;
506 lockdep_assert_held(&dev_priv->pps_mutex);
508 /* try to find a pipe with this port selected */
509 /* first pick one where the panel is on */
510 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
512 /* didn't find one? pick one where vdd is on */
513 if (intel_dp->pps_pipe == INVALID_PIPE)
514 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
515 vlv_pipe_has_vdd_on);
516 /* didn't find one? pick one with just the correct port */
517 if (intel_dp->pps_pipe == INVALID_PIPE)
518 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
521 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
522 if (intel_dp->pps_pipe == INVALID_PIPE) {
523 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
528 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
529 port_name(port), pipe_name(intel_dp->pps_pipe));
531 intel_dp_init_panel_power_sequencer(dev, intel_dp);
532 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
535 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
537 struct drm_device *dev = dev_priv->dev;
538 struct intel_encoder *encoder;
540 if (WARN_ON(!IS_VALLEYVIEW(dev)))
544 * We can't grab pps_mutex here due to deadlock with power_domain
545 * mutex when power_domain functions are called while holding pps_mutex.
546 * That also means that in order to use pps_pipe the code needs to
547 * hold both a power domain reference and pps_mutex, and the power domain
548 * reference get/put must be done while _not_ holding pps_mutex.
549 * pps_{lock,unlock}() do these steps in the correct order, so one
550 * should use them always.
553 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
554 struct intel_dp *intel_dp;
556 if (encoder->type != INTEL_OUTPUT_EDP)
559 intel_dp = enc_to_intel_dp(&encoder->base);
560 intel_dp->pps_pipe = INVALID_PIPE;
564 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
566 struct drm_device *dev = intel_dp_to_dev(intel_dp);
568 if (HAS_PCH_SPLIT(dev))
569 return PCH_PP_CONTROL;
571 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
574 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
576 struct drm_device *dev = intel_dp_to_dev(intel_dp);
578 if (HAS_PCH_SPLIT(dev))
579 return PCH_PP_STATUS;
581 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
584 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
585 This function only applicable when panel PM state is not to be tracked */
586 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
589 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
591 struct drm_device *dev = intel_dp_to_dev(intel_dp);
592 struct drm_i915_private *dev_priv = dev->dev_private;
594 u32 pp_ctrl_reg, pp_div_reg;
596 if (!is_edp(intel_dp) || code != SYS_RESTART)
601 if (IS_VALLEYVIEW(dev)) {
602 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
604 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
605 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
606 pp_div = I915_READ(pp_div_reg);
607 pp_div &= PP_REFERENCE_DIVIDER_MASK;
609 /* 0x1F write to PP_DIV_REG sets max cycle delay */
610 I915_WRITE(pp_div_reg, pp_div | 0x1F);
611 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
612 msleep(intel_dp->panel_power_cycle_delay);
615 pps_unlock(intel_dp);
620 static bool edp_have_panel_power(struct intel_dp *intel_dp)
622 struct drm_device *dev = intel_dp_to_dev(intel_dp);
623 struct drm_i915_private *dev_priv = dev->dev_private;
625 lockdep_assert_held(&dev_priv->pps_mutex);
627 if (IS_VALLEYVIEW(dev) &&
628 intel_dp->pps_pipe == INVALID_PIPE)
631 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
634 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
636 struct drm_device *dev = intel_dp_to_dev(intel_dp);
637 struct drm_i915_private *dev_priv = dev->dev_private;
639 lockdep_assert_held(&dev_priv->pps_mutex);
641 if (IS_VALLEYVIEW(dev) &&
642 intel_dp->pps_pipe == INVALID_PIPE)
645 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
649 intel_dp_check_edp(struct intel_dp *intel_dp)
651 struct drm_device *dev = intel_dp_to_dev(intel_dp);
652 struct drm_i915_private *dev_priv = dev->dev_private;
654 if (!is_edp(intel_dp))
657 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
658 WARN(1, "eDP powered off while attempting aux channel communication.\n");
659 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
660 I915_READ(_pp_stat_reg(intel_dp)),
661 I915_READ(_pp_ctrl_reg(intel_dp)));
666 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
668 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
669 struct drm_device *dev = intel_dig_port->base.base.dev;
670 struct drm_i915_private *dev_priv = dev->dev_private;
671 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
675 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
677 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
678 msecs_to_jiffies_timeout(10));
680 done = wait_for_atomic(C, 10) == 0;
682 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
689 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
691 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
692 struct drm_device *dev = intel_dig_port->base.base.dev;
695 * The clock divider is based off the hrawclk, and would like to run at
696 * 2MHz. So, take the hrawclk value and divide by 2 and use that
698 return index ? 0 : intel_hrawclk(dev) / 2;
701 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
703 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
704 struct drm_device *dev = intel_dig_port->base.base.dev;
705 struct drm_i915_private *dev_priv = dev->dev_private;
710 if (intel_dig_port->port == PORT_A) {
711 return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
713 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
717 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
719 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
720 struct drm_device *dev = intel_dig_port->base.base.dev;
721 struct drm_i915_private *dev_priv = dev->dev_private;
723 if (intel_dig_port->port == PORT_A) {
726 return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
727 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
728 /* Workaround for non-ULT HSW */
735 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
739 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
741 return index ? 0 : 100;
744 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
747 * SKL doesn't need us to program the AUX clock divider (Hardware will
748 * derive the clock from CDCLK automatically). We still implement the
749 * get_aux_clock_divider vfunc to plug-in into the existing code.
751 return index ? 0 : 1;
754 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
757 uint32_t aux_clock_divider)
759 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
760 struct drm_device *dev = intel_dig_port->base.base.dev;
761 uint32_t precharge, timeout;
768 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
769 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
771 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
773 return DP_AUX_CH_CTL_SEND_BUSY |
775 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
776 DP_AUX_CH_CTL_TIME_OUT_ERROR |
778 DP_AUX_CH_CTL_RECEIVE_ERROR |
779 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
780 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
781 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
784 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
789 return DP_AUX_CH_CTL_SEND_BUSY |
791 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
792 DP_AUX_CH_CTL_TIME_OUT_ERROR |
793 DP_AUX_CH_CTL_TIME_OUT_1600us |
794 DP_AUX_CH_CTL_RECEIVE_ERROR |
795 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
796 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
800 intel_dp_aux_ch(struct intel_dp *intel_dp,
801 const uint8_t *send, int send_bytes,
802 uint8_t *recv, int recv_size)
804 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
805 struct drm_device *dev = intel_dig_port->base.base.dev;
806 struct drm_i915_private *dev_priv = dev->dev_private;
807 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
808 uint32_t ch_data = ch_ctl + 4;
809 uint32_t aux_clock_divider;
810 int i, ret, recv_bytes;
813 bool has_aux_irq = HAS_AUX_IRQ(dev);
819 * We will be called with VDD already enabled for dpcd/edid/oui reads.
820 * In such cases we want to leave VDD enabled and it's up to upper layers
821 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
824 vdd = edp_panel_vdd_on(intel_dp);
826 /* dp aux is extremely sensitive to irq latency, hence request the
827 * lowest possible wakeup latency and so prevent the cpu from going into
830 pm_qos_update_request(&dev_priv->pm_qos, 0);
832 intel_dp_check_edp(intel_dp);
834 intel_aux_display_runtime_get(dev_priv);
836 /* Try to wait for any previous AUX channel activity */
837 for (try = 0; try < 3; try++) {
838 status = I915_READ_NOTRACE(ch_ctl);
839 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
845 WARN(1, "dp_aux_ch not started status 0x%08x\n",
851 /* Only 5 data registers! */
852 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
857 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
858 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
863 /* Must try at least 3 times according to DP spec */
864 for (try = 0; try < 5; try++) {
865 /* Load the send data into the aux channel data registers */
866 for (i = 0; i < send_bytes; i += 4)
867 I915_WRITE(ch_data + i,
868 intel_dp_pack_aux(send + i,
871 /* Send the command and wait for it to complete */
872 I915_WRITE(ch_ctl, send_ctl);
874 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
876 /* Clear done status and any errors */
880 DP_AUX_CH_CTL_TIME_OUT_ERROR |
881 DP_AUX_CH_CTL_RECEIVE_ERROR);
883 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
886 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
887 * 400us delay required for errors and timeouts
888 * Timeout errors from the HW already meet this
889 * requirement so skip to next iteration
891 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
892 usleep_range(400, 500);
895 if (status & DP_AUX_CH_CTL_DONE)
900 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
901 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
907 /* Check for timeout or receive error.
908 * Timeouts occur when the sink is not connected
910 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
911 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
916 /* Timeouts occur when the device isn't connected, so they're
917 * "normal" -- don't fill the kernel log with these */
918 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
919 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
924 /* Unload any bytes sent back from the other side */
925 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
926 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
927 if (recv_bytes > recv_size)
928 recv_bytes = recv_size;
930 for (i = 0; i < recv_bytes; i += 4)
931 intel_dp_unpack_aux(I915_READ(ch_data + i),
932 recv + i, recv_bytes - i);
936 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
937 intel_aux_display_runtime_put(dev_priv);
940 edp_panel_vdd_off(intel_dp, false);
942 pps_unlock(intel_dp);
947 #define BARE_ADDRESS_SIZE 3
948 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
950 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
952 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
953 uint8_t txbuf[20], rxbuf[20];
954 size_t txsize, rxsize;
957 txbuf[0] = (msg->request << 4) |
958 ((msg->address >> 16) & 0xf);
959 txbuf[1] = (msg->address >> 8) & 0xff;
960 txbuf[2] = msg->address & 0xff;
961 txbuf[3] = msg->size - 1;
963 switch (msg->request & ~DP_AUX_I2C_MOT) {
964 case DP_AUX_NATIVE_WRITE:
965 case DP_AUX_I2C_WRITE:
966 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
967 rxsize = 2; /* 0 or 1 data bytes */
969 if (WARN_ON(txsize > 20))
972 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
974 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
976 msg->reply = rxbuf[0] >> 4;
979 /* Number of bytes written in a short write. */
980 ret = clamp_t(int, rxbuf[1], 0, msg->size);
982 /* Return payload size. */
988 case DP_AUX_NATIVE_READ:
989 case DP_AUX_I2C_READ:
990 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
991 rxsize = msg->size + 1;
993 if (WARN_ON(rxsize > 20))
996 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
998 msg->reply = rxbuf[0] >> 4;
1000 * Assume happy day, and copy the data. The caller is
1001 * expected to check msg->reply before touching it.
1003 * Return payload size.
1006 memcpy(msg->buffer, rxbuf + 1, ret);
1019 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1021 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1022 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1023 enum port port = intel_dig_port->port;
1024 const char *name = NULL;
1029 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1033 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1037 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1041 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1049 * The AUX_CTL register is usually DP_CTL + 0x10.
1051 * On Haswell and Broadwell though:
1052 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1053 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1055 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1057 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1058 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1060 intel_dp->aux.name = name;
1061 intel_dp->aux.dev = dev->dev;
1062 intel_dp->aux.transfer = intel_dp_aux_transfer;
1064 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1065 connector->base.kdev->kobj.name);
1067 ret = drm_dp_aux_register(&intel_dp->aux);
1069 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1074 ret = sysfs_create_link(&connector->base.kdev->kobj,
1075 &intel_dp->aux.ddc.dev.kobj,
1076 intel_dp->aux.ddc.dev.kobj.name);
1078 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1079 drm_dp_aux_unregister(&intel_dp->aux);
1084 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1086 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1088 if (!intel_connector->mst_port)
1089 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1090 intel_dp->aux.ddc.dev.kobj.name);
1091 intel_connector_unregister(intel_connector);
1095 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1099 memset(&pipe_config->dpll_hw_state, 0,
1100 sizeof(pipe_config->dpll_hw_state));
1102 pipe_config->ddi_pll_sel = SKL_DPLL0;
1103 pipe_config->dpll_hw_state.cfgcr1 = 0;
1104 pipe_config->dpll_hw_state.cfgcr2 = 0;
1106 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1107 switch (link_clock / 2) {
1109 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1113 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1117 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1121 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1124 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1125 results in CDCLK change. Need to handle the change of CDCLK by
1126 disabling pipes and re-enabling them */
1128 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1132 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1137 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1141 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1143 memset(&pipe_config->dpll_hw_state, 0,
1144 sizeof(pipe_config->dpll_hw_state));
1147 case DP_LINK_BW_1_62:
1148 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1150 case DP_LINK_BW_2_7:
1151 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1153 case DP_LINK_BW_5_4:
1154 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1160 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1162 if (intel_dp->num_sink_rates) {
1163 *sink_rates = intel_dp->sink_rates;
1164 return intel_dp->num_sink_rates;
1167 *sink_rates = default_rates;
1169 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1173 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1175 if (IS_SKYLAKE(dev)) {
1176 *source_rates = skl_rates;
1177 return ARRAY_SIZE(skl_rates);
1178 } else if (IS_CHERRYVIEW(dev)) {
1179 *source_rates = chv_rates;
1180 return ARRAY_SIZE(chv_rates);
1183 *source_rates = default_rates;
1185 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1186 /* WaDisableHBR2:skl */
1187 return (DP_LINK_BW_2_7 >> 3) + 1;
1188 else if (INTEL_INFO(dev)->gen >= 8 ||
1189 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1190 return (DP_LINK_BW_5_4 >> 3) + 1;
1192 return (DP_LINK_BW_2_7 >> 3) + 1;
1196 intel_dp_set_clock(struct intel_encoder *encoder,
1197 struct intel_crtc_state *pipe_config, int link_bw)
1199 struct drm_device *dev = encoder->base.dev;
1200 const struct dp_link_dpll *divisor = NULL;
1204 divisor = gen4_dpll;
1205 count = ARRAY_SIZE(gen4_dpll);
1206 } else if (HAS_PCH_SPLIT(dev)) {
1208 count = ARRAY_SIZE(pch_dpll);
1209 } else if (IS_CHERRYVIEW(dev)) {
1211 count = ARRAY_SIZE(chv_dpll);
1212 } else if (IS_VALLEYVIEW(dev)) {
1214 count = ARRAY_SIZE(vlv_dpll);
1217 if (divisor && count) {
1218 for (i = 0; i < count; i++) {
1219 if (link_bw == divisor[i].link_bw) {
1220 pipe_config->dpll = divisor[i].dpll;
1221 pipe_config->clock_set = true;
1228 static int intersect_rates(const int *source_rates, int source_len,
1229 const int *sink_rates, int sink_len,
1232 int i = 0, j = 0, k = 0;
1234 while (i < source_len && j < sink_len) {
1235 if (source_rates[i] == sink_rates[j]) {
1236 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1238 common_rates[k] = source_rates[i];
1242 } else if (source_rates[i] < sink_rates[j]) {
1251 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1254 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1255 const int *source_rates, *sink_rates;
1256 int source_len, sink_len;
1258 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1259 source_len = intel_dp_source_rates(dev, &source_rates);
1261 return intersect_rates(source_rates, source_len,
1262 sink_rates, sink_len,
1266 static void snprintf_int_array(char *str, size_t len,
1267 const int *array, int nelem)
1273 for (i = 0; i < nelem; i++) {
1274 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1282 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1284 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1285 const int *source_rates, *sink_rates;
1286 int source_len, sink_len, common_len;
1287 int common_rates[DP_MAX_SUPPORTED_RATES];
1288 char str[128]; /* FIXME: too big for stack? */
1290 if ((drm_debug & DRM_UT_KMS) == 0)
1293 source_len = intel_dp_source_rates(dev, &source_rates);
1294 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1295 DRM_DEBUG_KMS("source rates: %s\n", str);
1297 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1298 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1299 DRM_DEBUG_KMS("sink rates: %s\n", str);
1301 common_len = intel_dp_common_rates(intel_dp, common_rates);
1302 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1303 DRM_DEBUG_KMS("common rates: %s\n", str);
1306 static int rate_to_index(int find, const int *rates)
1310 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1311 if (find == rates[i])
1318 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1320 int rates[DP_MAX_SUPPORTED_RATES] = {};
1323 len = intel_dp_common_rates(intel_dp, rates);
1324 if (WARN_ON(len <= 0))
1327 return rates[rate_to_index(0, rates) - 1];
1330 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1332 return rate_to_index(rate, intel_dp->sink_rates);
1336 intel_dp_compute_config(struct intel_encoder *encoder,
1337 struct intel_crtc_state *pipe_config)
1339 struct drm_device *dev = encoder->base.dev;
1340 struct drm_i915_private *dev_priv = dev->dev_private;
1341 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1342 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1343 enum port port = dp_to_dig_port(intel_dp)->port;
1344 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1345 struct intel_connector *intel_connector = intel_dp->attached_connector;
1346 int lane_count, clock;
1347 int min_lane_count = 1;
1348 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1349 /* Conveniently, the link BW constants become indices with a shift...*/
1353 int link_avail, link_clock;
1354 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1357 common_len = intel_dp_common_rates(intel_dp, common_rates);
1359 /* No common link rates between source and sink */
1360 WARN_ON(common_len <= 0);
1362 max_clock = common_len - 1;
1364 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1365 pipe_config->has_pch_encoder = true;
1367 pipe_config->has_dp_encoder = true;
1368 pipe_config->has_drrs = false;
1369 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1371 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1372 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1375 if (INTEL_INFO(dev)->gen >= 9) {
1377 ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
1382 if (!HAS_PCH_SPLIT(dev))
1383 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1384 intel_connector->panel.fitting_mode);
1386 intel_pch_panel_fitting(intel_crtc, pipe_config,
1387 intel_connector->panel.fitting_mode);
1390 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1393 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1394 "max bw %d pixel clock %iKHz\n",
1395 max_lane_count, common_rates[max_clock],
1396 adjusted_mode->crtc_clock);
1398 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1399 * bpc in between. */
1400 bpp = pipe_config->pipe_bpp;
1401 if (is_edp(intel_dp)) {
1402 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1403 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1404 dev_priv->vbt.edp_bpp);
1405 bpp = dev_priv->vbt.edp_bpp;
1409 * Use the maximum clock and number of lanes the eDP panel
1410 * advertizes being capable of. The panels are generally
1411 * designed to support only a single clock and lane
1412 * configuration, and typically these values correspond to the
1413 * native resolution of the panel.
1415 min_lane_count = max_lane_count;
1416 min_clock = max_clock;
1419 for (; bpp >= 6*3; bpp -= 2*3) {
1420 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1423 for (clock = min_clock; clock <= max_clock; clock++) {
1424 for (lane_count = min_lane_count;
1425 lane_count <= max_lane_count;
1428 link_clock = common_rates[clock];
1429 link_avail = intel_dp_max_data_rate(link_clock,
1432 if (mode_rate <= link_avail) {
1442 if (intel_dp->color_range_auto) {
1445 * CEA-861-E - 5.1 Default Encoding Parameters
1446 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1448 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1449 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1451 intel_dp->color_range = 0;
1454 if (intel_dp->color_range)
1455 pipe_config->limited_color_range = true;
1457 intel_dp->lane_count = lane_count;
1459 if (intel_dp->num_sink_rates) {
1460 intel_dp->link_bw = 0;
1461 intel_dp->rate_select =
1462 intel_dp_rate_select(intel_dp, common_rates[clock]);
1465 drm_dp_link_rate_to_bw_code(common_rates[clock]);
1466 intel_dp->rate_select = 0;
1469 pipe_config->pipe_bpp = bpp;
1470 pipe_config->port_clock = common_rates[clock];
1472 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1473 intel_dp->link_bw, intel_dp->lane_count,
1474 pipe_config->port_clock, bpp);
1475 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1476 mode_rate, link_avail);
1478 intel_link_compute_m_n(bpp, lane_count,
1479 adjusted_mode->crtc_clock,
1480 pipe_config->port_clock,
1481 &pipe_config->dp_m_n);
1483 if (intel_connector->panel.downclock_mode != NULL &&
1484 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1485 pipe_config->has_drrs = true;
1486 intel_link_compute_m_n(bpp, lane_count,
1487 intel_connector->panel.downclock_mode->clock,
1488 pipe_config->port_clock,
1489 &pipe_config->dp_m2_n2);
1492 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1493 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
1494 else if (IS_BROXTON(dev))
1495 /* handled in ddi */;
1496 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1497 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1499 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1504 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1506 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1507 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1508 struct drm_device *dev = crtc->base.dev;
1509 struct drm_i915_private *dev_priv = dev->dev_private;
1512 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1513 crtc->config->port_clock);
1514 dpa_ctl = I915_READ(DP_A);
1515 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1517 if (crtc->config->port_clock == 162000) {
1518 /* For a long time we've carried around a ILK-DevA w/a for the
1519 * 160MHz clock. If we're really unlucky, it's still required.
1521 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1522 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1523 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1525 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1526 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1529 I915_WRITE(DP_A, dpa_ctl);
1535 static void intel_dp_prepare(struct intel_encoder *encoder)
1537 struct drm_device *dev = encoder->base.dev;
1538 struct drm_i915_private *dev_priv = dev->dev_private;
1539 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1540 enum port port = dp_to_dig_port(intel_dp)->port;
1541 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1542 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1545 * There are four kinds of DP registers:
1552 * IBX PCH and CPU are the same for almost everything,
1553 * except that the CPU DP PLL is configured in this
1556 * CPT PCH is quite different, having many bits moved
1557 * to the TRANS_DP_CTL register instead. That
1558 * configuration happens (oddly) in ironlake_pch_enable
1561 /* Preserve the BIOS-computed detected bit. This is
1562 * supposed to be read-only.
1564 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1566 /* Handle DP bits in common between all three register formats */
1567 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1568 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1570 if (crtc->config->has_audio)
1571 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1573 /* Split out the IBX/CPU vs CPT settings */
1575 if (IS_GEN7(dev) && port == PORT_A) {
1576 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1577 intel_dp->DP |= DP_SYNC_HS_HIGH;
1578 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1579 intel_dp->DP |= DP_SYNC_VS_HIGH;
1580 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1582 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1583 intel_dp->DP |= DP_ENHANCED_FRAMING;
1585 intel_dp->DP |= crtc->pipe << 29;
1586 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1589 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1591 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1592 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1593 trans_dp |= TRANS_DP_ENH_FRAMING;
1595 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1596 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1598 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1599 intel_dp->DP |= intel_dp->color_range;
1601 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1602 intel_dp->DP |= DP_SYNC_HS_HIGH;
1603 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1604 intel_dp->DP |= DP_SYNC_VS_HIGH;
1605 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1607 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1608 intel_dp->DP |= DP_ENHANCED_FRAMING;
1610 if (IS_CHERRYVIEW(dev))
1611 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1612 else if (crtc->pipe == PIPE_B)
1613 intel_dp->DP |= DP_PIPEB_SELECT;
1617 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1618 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1620 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1621 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1623 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1624 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1626 static void wait_panel_status(struct intel_dp *intel_dp,
1630 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1631 struct drm_i915_private *dev_priv = dev->dev_private;
1632 u32 pp_stat_reg, pp_ctrl_reg;
1634 lockdep_assert_held(&dev_priv->pps_mutex);
1636 pp_stat_reg = _pp_stat_reg(intel_dp);
1637 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1639 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1641 I915_READ(pp_stat_reg),
1642 I915_READ(pp_ctrl_reg));
1644 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1645 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1646 I915_READ(pp_stat_reg),
1647 I915_READ(pp_ctrl_reg));
1650 DRM_DEBUG_KMS("Wait complete\n");
1653 static void wait_panel_on(struct intel_dp *intel_dp)
1655 DRM_DEBUG_KMS("Wait for panel power on\n");
1656 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1659 static void wait_panel_off(struct intel_dp *intel_dp)
1661 DRM_DEBUG_KMS("Wait for panel power off time\n");
1662 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1665 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1667 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1669 /* When we disable the VDD override bit last we have to do the manual
1671 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1672 intel_dp->panel_power_cycle_delay);
1674 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1677 static void wait_backlight_on(struct intel_dp *intel_dp)
1679 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1680 intel_dp->backlight_on_delay);
1683 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1685 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1686 intel_dp->backlight_off_delay);
1689 /* Read the current pp_control value, unlocking the register if it
1693 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1695 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1696 struct drm_i915_private *dev_priv = dev->dev_private;
1699 lockdep_assert_held(&dev_priv->pps_mutex);
1701 control = I915_READ(_pp_ctrl_reg(intel_dp));
1702 control &= ~PANEL_UNLOCK_MASK;
1703 control |= PANEL_UNLOCK_REGS;
1708 * Must be paired with edp_panel_vdd_off().
1709 * Must hold pps_mutex around the whole on/off sequence.
1710 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1712 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1714 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1715 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1716 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1717 struct drm_i915_private *dev_priv = dev->dev_private;
1718 enum intel_display_power_domain power_domain;
1720 u32 pp_stat_reg, pp_ctrl_reg;
1721 bool need_to_disable = !intel_dp->want_panel_vdd;
1723 lockdep_assert_held(&dev_priv->pps_mutex);
1725 if (!is_edp(intel_dp))
1728 cancel_delayed_work(&intel_dp->panel_vdd_work);
1729 intel_dp->want_panel_vdd = true;
1731 if (edp_have_panel_vdd(intel_dp))
1732 return need_to_disable;
1734 power_domain = intel_display_port_power_domain(intel_encoder);
1735 intel_display_power_get(dev_priv, power_domain);
1737 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1738 port_name(intel_dig_port->port));
1740 if (!edp_have_panel_power(intel_dp))
1741 wait_panel_power_cycle(intel_dp);
1743 pp = ironlake_get_pp_control(intel_dp);
1744 pp |= EDP_FORCE_VDD;
1746 pp_stat_reg = _pp_stat_reg(intel_dp);
1747 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1749 I915_WRITE(pp_ctrl_reg, pp);
1750 POSTING_READ(pp_ctrl_reg);
1751 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1752 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1754 * If the panel wasn't on, delay before accessing aux channel
1756 if (!edp_have_panel_power(intel_dp)) {
1757 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1758 port_name(intel_dig_port->port));
1759 msleep(intel_dp->panel_power_up_delay);
1762 return need_to_disable;
1766 * Must be paired with intel_edp_panel_vdd_off() or
1767 * intel_edp_panel_off().
1768 * Nested calls to these functions are not allowed since
1769 * we drop the lock. Caller must use some higher level
1770 * locking to prevent nested calls from other threads.
1772 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1776 if (!is_edp(intel_dp))
1780 vdd = edp_panel_vdd_on(intel_dp);
1781 pps_unlock(intel_dp);
1783 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1784 port_name(dp_to_dig_port(intel_dp)->port));
1787 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1789 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1790 struct drm_i915_private *dev_priv = dev->dev_private;
1791 struct intel_digital_port *intel_dig_port =
1792 dp_to_dig_port(intel_dp);
1793 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1794 enum intel_display_power_domain power_domain;
1796 u32 pp_stat_reg, pp_ctrl_reg;
1798 lockdep_assert_held(&dev_priv->pps_mutex);
1800 WARN_ON(intel_dp->want_panel_vdd);
1802 if (!edp_have_panel_vdd(intel_dp))
1805 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1806 port_name(intel_dig_port->port));
1808 pp = ironlake_get_pp_control(intel_dp);
1809 pp &= ~EDP_FORCE_VDD;
1811 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1812 pp_stat_reg = _pp_stat_reg(intel_dp);
1814 I915_WRITE(pp_ctrl_reg, pp);
1815 POSTING_READ(pp_ctrl_reg);
1817 /* Make sure sequencer is idle before allowing subsequent activity */
1818 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1819 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1821 if ((pp & POWER_TARGET_ON) == 0)
1822 intel_dp->last_power_cycle = jiffies;
1824 power_domain = intel_display_port_power_domain(intel_encoder);
1825 intel_display_power_put(dev_priv, power_domain);
1828 static void edp_panel_vdd_work(struct work_struct *__work)
1830 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1831 struct intel_dp, panel_vdd_work);
1834 if (!intel_dp->want_panel_vdd)
1835 edp_panel_vdd_off_sync(intel_dp);
1836 pps_unlock(intel_dp);
1839 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1841 unsigned long delay;
1844 * Queue the timer to fire a long time from now (relative to the power
1845 * down delay) to keep the panel power up across a sequence of
1848 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1849 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1853 * Must be paired with edp_panel_vdd_on().
1854 * Must hold pps_mutex around the whole on/off sequence.
1855 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1857 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1859 struct drm_i915_private *dev_priv =
1860 intel_dp_to_dev(intel_dp)->dev_private;
1862 lockdep_assert_held(&dev_priv->pps_mutex);
1864 if (!is_edp(intel_dp))
1867 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1868 port_name(dp_to_dig_port(intel_dp)->port));
1870 intel_dp->want_panel_vdd = false;
1873 edp_panel_vdd_off_sync(intel_dp);
1875 edp_panel_vdd_schedule_off(intel_dp);
1878 static void edp_panel_on(struct intel_dp *intel_dp)
1880 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1881 struct drm_i915_private *dev_priv = dev->dev_private;
1885 lockdep_assert_held(&dev_priv->pps_mutex);
1887 if (!is_edp(intel_dp))
1890 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1891 port_name(dp_to_dig_port(intel_dp)->port));
1893 if (WARN(edp_have_panel_power(intel_dp),
1894 "eDP port %c panel power already on\n",
1895 port_name(dp_to_dig_port(intel_dp)->port)))
1898 wait_panel_power_cycle(intel_dp);
1900 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1901 pp = ironlake_get_pp_control(intel_dp);
1903 /* ILK workaround: disable reset around power sequence */
1904 pp &= ~PANEL_POWER_RESET;
1905 I915_WRITE(pp_ctrl_reg, pp);
1906 POSTING_READ(pp_ctrl_reg);
1909 pp |= POWER_TARGET_ON;
1911 pp |= PANEL_POWER_RESET;
1913 I915_WRITE(pp_ctrl_reg, pp);
1914 POSTING_READ(pp_ctrl_reg);
1916 wait_panel_on(intel_dp);
1917 intel_dp->last_power_on = jiffies;
1920 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1921 I915_WRITE(pp_ctrl_reg, pp);
1922 POSTING_READ(pp_ctrl_reg);
1926 void intel_edp_panel_on(struct intel_dp *intel_dp)
1928 if (!is_edp(intel_dp))
1932 edp_panel_on(intel_dp);
1933 pps_unlock(intel_dp);
1937 static void edp_panel_off(struct intel_dp *intel_dp)
1939 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1940 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1941 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1942 struct drm_i915_private *dev_priv = dev->dev_private;
1943 enum intel_display_power_domain power_domain;
1947 lockdep_assert_held(&dev_priv->pps_mutex);
1949 if (!is_edp(intel_dp))
1952 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1953 port_name(dp_to_dig_port(intel_dp)->port));
1955 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1956 port_name(dp_to_dig_port(intel_dp)->port));
1958 pp = ironlake_get_pp_control(intel_dp);
1959 /* We need to switch off panel power _and_ force vdd, for otherwise some
1960 * panels get very unhappy and cease to work. */
1961 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1964 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1966 intel_dp->want_panel_vdd = false;
1968 I915_WRITE(pp_ctrl_reg, pp);
1969 POSTING_READ(pp_ctrl_reg);
1971 intel_dp->last_power_cycle = jiffies;
1972 wait_panel_off(intel_dp);
1974 /* We got a reference when we enabled the VDD. */
1975 power_domain = intel_display_port_power_domain(intel_encoder);
1976 intel_display_power_put(dev_priv, power_domain);
1979 void intel_edp_panel_off(struct intel_dp *intel_dp)
1981 if (!is_edp(intel_dp))
1985 edp_panel_off(intel_dp);
1986 pps_unlock(intel_dp);
1989 /* Enable backlight in the panel power control. */
1990 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1992 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1993 struct drm_device *dev = intel_dig_port->base.base.dev;
1994 struct drm_i915_private *dev_priv = dev->dev_private;
1999 * If we enable the backlight right away following a panel power
2000 * on, we may see slight flicker as the panel syncs with the eDP
2001 * link. So delay a bit to make sure the image is solid before
2002 * allowing it to appear.
2004 wait_backlight_on(intel_dp);
2008 pp = ironlake_get_pp_control(intel_dp);
2009 pp |= EDP_BLC_ENABLE;
2011 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2013 I915_WRITE(pp_ctrl_reg, pp);
2014 POSTING_READ(pp_ctrl_reg);
2016 pps_unlock(intel_dp);
2019 /* Enable backlight PWM and backlight PP control. */
2020 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2022 if (!is_edp(intel_dp))
2025 DRM_DEBUG_KMS("\n");
2027 intel_panel_enable_backlight(intel_dp->attached_connector);
2028 _intel_edp_backlight_on(intel_dp);
2031 /* Disable backlight in the panel power control. */
2032 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2034 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2035 struct drm_i915_private *dev_priv = dev->dev_private;
2039 if (!is_edp(intel_dp))
2044 pp = ironlake_get_pp_control(intel_dp);
2045 pp &= ~EDP_BLC_ENABLE;
2047 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2049 I915_WRITE(pp_ctrl_reg, pp);
2050 POSTING_READ(pp_ctrl_reg);
2052 pps_unlock(intel_dp);
2054 intel_dp->last_backlight_off = jiffies;
2055 edp_wait_backlight_off(intel_dp);
2058 /* Disable backlight PP control and backlight PWM. */
2059 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2061 if (!is_edp(intel_dp))
2064 DRM_DEBUG_KMS("\n");
2066 _intel_edp_backlight_off(intel_dp);
2067 intel_panel_disable_backlight(intel_dp->attached_connector);
2071 * Hook for controlling the panel power control backlight through the bl_power
2072 * sysfs attribute. Take care to handle multiple calls.
2074 static void intel_edp_backlight_power(struct intel_connector *connector,
2077 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2081 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2082 pps_unlock(intel_dp);
2084 if (is_enabled == enable)
2087 DRM_DEBUG_KMS("panel power control backlight %s\n",
2088 enable ? "enable" : "disable");
2091 _intel_edp_backlight_on(intel_dp);
2093 _intel_edp_backlight_off(intel_dp);
2096 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2098 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2099 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2100 struct drm_device *dev = crtc->dev;
2101 struct drm_i915_private *dev_priv = dev->dev_private;
2104 assert_pipe_disabled(dev_priv,
2105 to_intel_crtc(crtc)->pipe);
2107 DRM_DEBUG_KMS("\n");
2108 dpa_ctl = I915_READ(DP_A);
2109 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2110 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2112 /* We don't adjust intel_dp->DP while tearing down the link, to
2113 * facilitate link retraining (e.g. after hotplug). Hence clear all
2114 * enable bits here to ensure that we don't enable too much. */
2115 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2116 intel_dp->DP |= DP_PLL_ENABLE;
2117 I915_WRITE(DP_A, intel_dp->DP);
2122 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2124 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2125 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2126 struct drm_device *dev = crtc->dev;
2127 struct drm_i915_private *dev_priv = dev->dev_private;
2130 assert_pipe_disabled(dev_priv,
2131 to_intel_crtc(crtc)->pipe);
2133 dpa_ctl = I915_READ(DP_A);
2134 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2135 "dp pll off, should be on\n");
2136 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2138 /* We can't rely on the value tracked for the DP register in
2139 * intel_dp->DP because link_down must not change that (otherwise link
2140 * re-training will fail. */
2141 dpa_ctl &= ~DP_PLL_ENABLE;
2142 I915_WRITE(DP_A, dpa_ctl);
2147 /* If the sink supports it, try to set the power state appropriately */
2148 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2152 /* Should have a valid DPCD by this point */
2153 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2156 if (mode != DRM_MODE_DPMS_ON) {
2157 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2161 * When turning on, we need to retry for 1ms to give the sink
2164 for (i = 0; i < 3; i++) {
2165 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2174 DRM_DEBUG_KMS("failed to %s sink power state\n",
2175 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2178 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2181 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2182 enum port port = dp_to_dig_port(intel_dp)->port;
2183 struct drm_device *dev = encoder->base.dev;
2184 struct drm_i915_private *dev_priv = dev->dev_private;
2185 enum intel_display_power_domain power_domain;
2188 power_domain = intel_display_port_power_domain(encoder);
2189 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2192 tmp = I915_READ(intel_dp->output_reg);
2194 if (!(tmp & DP_PORT_EN))
2197 if (IS_GEN7(dev) && port == PORT_A) {
2198 *pipe = PORT_TO_PIPE_CPT(tmp);
2199 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2202 for_each_pipe(dev_priv, p) {
2203 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2204 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2210 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2211 intel_dp->output_reg);
2212 } else if (IS_CHERRYVIEW(dev)) {
2213 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2215 *pipe = PORT_TO_PIPE(tmp);
2221 static void intel_dp_get_config(struct intel_encoder *encoder,
2222 struct intel_crtc_state *pipe_config)
2224 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2226 struct drm_device *dev = encoder->base.dev;
2227 struct drm_i915_private *dev_priv = dev->dev_private;
2228 enum port port = dp_to_dig_port(intel_dp)->port;
2229 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2232 tmp = I915_READ(intel_dp->output_reg);
2234 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2236 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2237 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2238 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2239 flags |= DRM_MODE_FLAG_PHSYNC;
2241 flags |= DRM_MODE_FLAG_NHSYNC;
2243 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2244 flags |= DRM_MODE_FLAG_PVSYNC;
2246 flags |= DRM_MODE_FLAG_NVSYNC;
2248 if (tmp & DP_SYNC_HS_HIGH)
2249 flags |= DRM_MODE_FLAG_PHSYNC;
2251 flags |= DRM_MODE_FLAG_NHSYNC;
2253 if (tmp & DP_SYNC_VS_HIGH)
2254 flags |= DRM_MODE_FLAG_PVSYNC;
2256 flags |= DRM_MODE_FLAG_NVSYNC;
2259 pipe_config->base.adjusted_mode.flags |= flags;
2261 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2262 tmp & DP_COLOR_RANGE_16_235)
2263 pipe_config->limited_color_range = true;
2265 pipe_config->has_dp_encoder = true;
2267 intel_dp_get_m_n(crtc, pipe_config);
2269 if (port == PORT_A) {
2270 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2271 pipe_config->port_clock = 162000;
2273 pipe_config->port_clock = 270000;
2276 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2277 &pipe_config->dp_m_n);
2279 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2280 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2282 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2284 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2285 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2287 * This is a big fat ugly hack.
2289 * Some machines in UEFI boot mode provide us a VBT that has 18
2290 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2291 * unknown we fail to light up. Yet the same BIOS boots up with
2292 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2293 * max, not what it tells us to use.
2295 * Note: This will still be broken if the eDP panel is not lit
2296 * up by the BIOS, and thus we can't get the mode at module
2299 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2300 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2301 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2305 static void intel_disable_dp(struct intel_encoder *encoder)
2307 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2308 struct drm_device *dev = encoder->base.dev;
2309 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2311 if (crtc->config->has_audio)
2312 intel_audio_codec_disable(encoder);
2314 if (HAS_PSR(dev) && !HAS_DDI(dev))
2315 intel_psr_disable(intel_dp);
2317 /* Make sure the panel is off before trying to change the mode. But also
2318 * ensure that we have vdd while we switch off the panel. */
2319 intel_edp_panel_vdd_on(intel_dp);
2320 intel_edp_backlight_off(intel_dp);
2321 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2322 intel_edp_panel_off(intel_dp);
2324 /* disable the port before the pipe on g4x */
2325 if (INTEL_INFO(dev)->gen < 5)
2326 intel_dp_link_down(intel_dp);
2329 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2331 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2332 enum port port = dp_to_dig_port(intel_dp)->port;
2334 intel_dp_link_down(intel_dp);
2336 ironlake_edp_pll_off(intel_dp);
2339 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2341 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2343 intel_dp_link_down(intel_dp);
2346 static void chv_post_disable_dp(struct intel_encoder *encoder)
2348 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2349 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2350 struct drm_device *dev = encoder->base.dev;
2351 struct drm_i915_private *dev_priv = dev->dev_private;
2352 struct intel_crtc *intel_crtc =
2353 to_intel_crtc(encoder->base.crtc);
2354 enum dpio_channel ch = vlv_dport_to_channel(dport);
2355 enum pipe pipe = intel_crtc->pipe;
2358 intel_dp_link_down(intel_dp);
2360 mutex_lock(&dev_priv->sb_lock);
2362 /* Propagate soft reset to data lane reset */
2363 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2364 val |= CHV_PCS_REQ_SOFTRESET_EN;
2365 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2367 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2368 val |= CHV_PCS_REQ_SOFTRESET_EN;
2369 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2371 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2372 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2373 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2375 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2376 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2377 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2379 mutex_unlock(&dev_priv->sb_lock);
2383 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2385 uint8_t dp_train_pat)
2387 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2388 struct drm_device *dev = intel_dig_port->base.base.dev;
2389 struct drm_i915_private *dev_priv = dev->dev_private;
2390 enum port port = intel_dig_port->port;
2393 uint32_t temp = I915_READ(DP_TP_CTL(port));
2395 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2396 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2398 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2400 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2401 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2402 case DP_TRAINING_PATTERN_DISABLE:
2403 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2406 case DP_TRAINING_PATTERN_1:
2407 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2409 case DP_TRAINING_PATTERN_2:
2410 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2412 case DP_TRAINING_PATTERN_3:
2413 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2416 I915_WRITE(DP_TP_CTL(port), temp);
2418 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2419 (HAS_PCH_CPT(dev) && port != PORT_A)) {
2420 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2422 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2423 case DP_TRAINING_PATTERN_DISABLE:
2424 *DP |= DP_LINK_TRAIN_OFF_CPT;
2426 case DP_TRAINING_PATTERN_1:
2427 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2429 case DP_TRAINING_PATTERN_2:
2430 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2432 case DP_TRAINING_PATTERN_3:
2433 DRM_ERROR("DP training pattern 3 not supported\n");
2434 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2439 if (IS_CHERRYVIEW(dev))
2440 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2442 *DP &= ~DP_LINK_TRAIN_MASK;
2444 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2445 case DP_TRAINING_PATTERN_DISABLE:
2446 *DP |= DP_LINK_TRAIN_OFF;
2448 case DP_TRAINING_PATTERN_1:
2449 *DP |= DP_LINK_TRAIN_PAT_1;
2451 case DP_TRAINING_PATTERN_2:
2452 *DP |= DP_LINK_TRAIN_PAT_2;
2454 case DP_TRAINING_PATTERN_3:
2455 if (IS_CHERRYVIEW(dev)) {
2456 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2458 DRM_ERROR("DP training pattern 3 not supported\n");
2459 *DP |= DP_LINK_TRAIN_PAT_2;
2466 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2468 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2469 struct drm_i915_private *dev_priv = dev->dev_private;
2471 /* enable with pattern 1 (as per spec) */
2472 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2473 DP_TRAINING_PATTERN_1);
2475 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2476 POSTING_READ(intel_dp->output_reg);
2479 * Magic for VLV/CHV. We _must_ first set up the register
2480 * without actually enabling the port, and then do another
2481 * write to enable the port. Otherwise link training will
2482 * fail when the power sequencer is freshly used for this port.
2484 intel_dp->DP |= DP_PORT_EN;
2486 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2487 POSTING_READ(intel_dp->output_reg);
2490 static void intel_enable_dp(struct intel_encoder *encoder)
2492 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2493 struct drm_device *dev = encoder->base.dev;
2494 struct drm_i915_private *dev_priv = dev->dev_private;
2495 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2496 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2497 unsigned int lane_mask = 0x0;
2499 if (WARN_ON(dp_reg & DP_PORT_EN))
2504 if (IS_VALLEYVIEW(dev))
2505 vlv_init_panel_power_sequencer(intel_dp);
2507 intel_dp_enable_port(intel_dp);
2509 edp_panel_vdd_on(intel_dp);
2510 edp_panel_on(intel_dp);
2511 edp_panel_vdd_off(intel_dp, true);
2513 pps_unlock(intel_dp);
2515 if (IS_VALLEYVIEW(dev))
2516 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2519 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2520 intel_dp_start_link_train(intel_dp);
2521 intel_dp_complete_link_train(intel_dp);
2522 intel_dp_stop_link_train(intel_dp);
2524 if (crtc->config->has_audio) {
2525 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2526 pipe_name(crtc->pipe));
2527 intel_audio_codec_enable(encoder);
2531 static void g4x_enable_dp(struct intel_encoder *encoder)
2533 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2535 intel_enable_dp(encoder);
2536 intel_edp_backlight_on(intel_dp);
2539 static void vlv_enable_dp(struct intel_encoder *encoder)
2541 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2543 intel_edp_backlight_on(intel_dp);
2544 intel_psr_enable(intel_dp);
2547 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2549 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2550 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2552 intel_dp_prepare(encoder);
2554 /* Only ilk+ has port A */
2555 if (dport->port == PORT_A) {
2556 ironlake_set_pll_cpu_edp(intel_dp);
2557 ironlake_edp_pll_on(intel_dp);
2561 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2563 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2564 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2565 enum pipe pipe = intel_dp->pps_pipe;
2566 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2568 edp_panel_vdd_off_sync(intel_dp);
2571 * VLV seems to get confused when multiple power seqeuencers
2572 * have the same port selected (even if only one has power/vdd
2573 * enabled). The failure manifests as vlv_wait_port_ready() failing
2574 * CHV on the other hand doesn't seem to mind having the same port
2575 * selected in multiple power seqeuencers, but let's clear the
2576 * port select always when logically disconnecting a power sequencer
2579 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2580 pipe_name(pipe), port_name(intel_dig_port->port));
2581 I915_WRITE(pp_on_reg, 0);
2582 POSTING_READ(pp_on_reg);
2584 intel_dp->pps_pipe = INVALID_PIPE;
2587 static void vlv_steal_power_sequencer(struct drm_device *dev,
2590 struct drm_i915_private *dev_priv = dev->dev_private;
2591 struct intel_encoder *encoder;
2593 lockdep_assert_held(&dev_priv->pps_mutex);
2595 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2598 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2600 struct intel_dp *intel_dp;
2603 if (encoder->type != INTEL_OUTPUT_EDP)
2606 intel_dp = enc_to_intel_dp(&encoder->base);
2607 port = dp_to_dig_port(intel_dp)->port;
2609 if (intel_dp->pps_pipe != pipe)
2612 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2613 pipe_name(pipe), port_name(port));
2615 WARN(encoder->connectors_active,
2616 "stealing pipe %c power sequencer from active eDP port %c\n",
2617 pipe_name(pipe), port_name(port));
2619 /* make sure vdd is off before we steal it */
2620 vlv_detach_power_sequencer(intel_dp);
2624 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2626 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2627 struct intel_encoder *encoder = &intel_dig_port->base;
2628 struct drm_device *dev = encoder->base.dev;
2629 struct drm_i915_private *dev_priv = dev->dev_private;
2630 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2632 lockdep_assert_held(&dev_priv->pps_mutex);
2634 if (!is_edp(intel_dp))
2637 if (intel_dp->pps_pipe == crtc->pipe)
2641 * If another power sequencer was being used on this
2642 * port previously make sure to turn off vdd there while
2643 * we still have control of it.
2645 if (intel_dp->pps_pipe != INVALID_PIPE)
2646 vlv_detach_power_sequencer(intel_dp);
2649 * We may be stealing the power
2650 * sequencer from another port.
2652 vlv_steal_power_sequencer(dev, crtc->pipe);
2654 /* now it's all ours */
2655 intel_dp->pps_pipe = crtc->pipe;
2657 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2658 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2660 /* init power sequencer on this pipe and port */
2661 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2662 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2665 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2667 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2668 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2669 struct drm_device *dev = encoder->base.dev;
2670 struct drm_i915_private *dev_priv = dev->dev_private;
2671 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2672 enum dpio_channel port = vlv_dport_to_channel(dport);
2673 int pipe = intel_crtc->pipe;
2676 mutex_lock(&dev_priv->sb_lock);
2678 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2685 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2686 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2687 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2689 mutex_unlock(&dev_priv->sb_lock);
2691 intel_enable_dp(encoder);
2694 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2696 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2697 struct drm_device *dev = encoder->base.dev;
2698 struct drm_i915_private *dev_priv = dev->dev_private;
2699 struct intel_crtc *intel_crtc =
2700 to_intel_crtc(encoder->base.crtc);
2701 enum dpio_channel port = vlv_dport_to_channel(dport);
2702 int pipe = intel_crtc->pipe;
2704 intel_dp_prepare(encoder);
2706 /* Program Tx lane resets to default */
2707 mutex_lock(&dev_priv->sb_lock);
2708 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2709 DPIO_PCS_TX_LANE2_RESET |
2710 DPIO_PCS_TX_LANE1_RESET);
2711 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2712 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2713 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2714 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2715 DPIO_PCS_CLK_SOFT_RESET);
2717 /* Fix up inter-pair skew failure */
2718 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2719 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2720 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2721 mutex_unlock(&dev_priv->sb_lock);
2724 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2726 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2727 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2728 struct drm_device *dev = encoder->base.dev;
2729 struct drm_i915_private *dev_priv = dev->dev_private;
2730 struct intel_crtc *intel_crtc =
2731 to_intel_crtc(encoder->base.crtc);
2732 enum dpio_channel ch = vlv_dport_to_channel(dport);
2733 int pipe = intel_crtc->pipe;
2734 int data, i, stagger;
2737 mutex_lock(&dev_priv->sb_lock);
2739 /* allow hardware to manage TX FIFO reset source */
2740 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2741 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2742 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2744 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2745 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2746 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2748 /* Deassert soft data lane reset*/
2749 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2750 val |= CHV_PCS_REQ_SOFTRESET_EN;
2751 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2753 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2754 val |= CHV_PCS_REQ_SOFTRESET_EN;
2755 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2757 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2758 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2759 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2761 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2762 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2763 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2765 /* Program Tx lane latency optimal setting*/
2766 for (i = 0; i < 4; i++) {
2767 /* Set the upar bit */
2768 data = (i == 1) ? 0x0 : 0x1;
2769 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2770 data << DPIO_UPAR_SHIFT);
2773 /* Data lane stagger programming */
2774 if (intel_crtc->config->port_clock > 270000)
2776 else if (intel_crtc->config->port_clock > 135000)
2778 else if (intel_crtc->config->port_clock > 67500)
2780 else if (intel_crtc->config->port_clock > 33750)
2785 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2786 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2787 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2789 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2790 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2791 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2793 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2794 DPIO_LANESTAGGER_STRAP(stagger) |
2795 DPIO_LANESTAGGER_STRAP_OVRD |
2796 DPIO_TX1_STAGGER_MASK(0x1f) |
2797 DPIO_TX1_STAGGER_MULT(6) |
2798 DPIO_TX2_STAGGER_MULT(0));
2800 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2801 DPIO_LANESTAGGER_STRAP(stagger) |
2802 DPIO_LANESTAGGER_STRAP_OVRD |
2803 DPIO_TX1_STAGGER_MASK(0x1f) |
2804 DPIO_TX1_STAGGER_MULT(7) |
2805 DPIO_TX2_STAGGER_MULT(5));
2807 mutex_unlock(&dev_priv->sb_lock);
2809 intel_enable_dp(encoder);
2812 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2814 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2815 struct drm_device *dev = encoder->base.dev;
2816 struct drm_i915_private *dev_priv = dev->dev_private;
2817 struct intel_crtc *intel_crtc =
2818 to_intel_crtc(encoder->base.crtc);
2819 enum dpio_channel ch = vlv_dport_to_channel(dport);
2820 enum pipe pipe = intel_crtc->pipe;
2823 intel_dp_prepare(encoder);
2825 mutex_lock(&dev_priv->sb_lock);
2827 /* program left/right clock distribution */
2828 if (pipe != PIPE_B) {
2829 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2830 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2832 val |= CHV_BUFLEFTENA1_FORCE;
2834 val |= CHV_BUFRIGHTENA1_FORCE;
2835 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2837 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2838 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2840 val |= CHV_BUFLEFTENA2_FORCE;
2842 val |= CHV_BUFRIGHTENA2_FORCE;
2843 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2846 /* program clock channel usage */
2847 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2848 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2850 val &= ~CHV_PCS_USEDCLKCHANNEL;
2852 val |= CHV_PCS_USEDCLKCHANNEL;
2853 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2855 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2856 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2858 val &= ~CHV_PCS_USEDCLKCHANNEL;
2860 val |= CHV_PCS_USEDCLKCHANNEL;
2861 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2864 * This a a bit weird since generally CL
2865 * matches the pipe, but here we need to
2866 * pick the CL based on the port.
2868 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2870 val &= ~CHV_CMN_USEDCLKCHANNEL;
2872 val |= CHV_CMN_USEDCLKCHANNEL;
2873 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2875 mutex_unlock(&dev_priv->sb_lock);
2879 * Native read with retry for link status and receiver capability reads for
2880 * cases where the sink may still be asleep.
2882 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2883 * supposed to retry 3 times per the spec.
2886 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2887 void *buffer, size_t size)
2893 * Sometime we just get the same incorrect byte repeated
2894 * over the entire buffer. Doing just one throw away read
2895 * initially seems to "solve" it.
2897 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2899 for (i = 0; i < 3; i++) {
2900 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2910 * Fetch AUX CH registers 0x202 - 0x207 which contain
2911 * link status information
2914 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2916 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2919 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2922 /* These are source-specific values. */
2924 intel_dp_voltage_max(struct intel_dp *intel_dp)
2926 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2927 struct drm_i915_private *dev_priv = dev->dev_private;
2928 enum port port = dp_to_dig_port(intel_dp)->port;
2930 if (IS_BROXTON(dev))
2931 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2932 else if (INTEL_INFO(dev)->gen >= 9) {
2933 if (dev_priv->edp_low_vswing && port == PORT_A)
2934 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2935 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2936 } else if (IS_VALLEYVIEW(dev))
2937 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2938 else if (IS_GEN7(dev) && port == PORT_A)
2939 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2940 else if (HAS_PCH_CPT(dev) && port != PORT_A)
2941 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2943 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2947 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2949 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2950 enum port port = dp_to_dig_port(intel_dp)->port;
2952 if (INTEL_INFO(dev)->gen >= 9) {
2953 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2954 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2955 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2956 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2957 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2958 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2959 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2960 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2961 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2963 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2965 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2966 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2967 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2968 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2969 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2970 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2971 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2972 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2973 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2975 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2977 } else if (IS_VALLEYVIEW(dev)) {
2978 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2979 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2980 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2981 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2982 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2983 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2984 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2985 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2987 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2989 } else if (IS_GEN7(dev) && port == PORT_A) {
2990 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2991 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2992 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2993 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2994 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2995 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2997 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3000 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3001 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3002 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3003 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3004 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3005 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3006 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3007 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3009 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3014 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3016 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3017 struct drm_i915_private *dev_priv = dev->dev_private;
3018 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3019 struct intel_crtc *intel_crtc =
3020 to_intel_crtc(dport->base.base.crtc);
3021 unsigned long demph_reg_value, preemph_reg_value,
3022 uniqtranscale_reg_value;
3023 uint8_t train_set = intel_dp->train_set[0];
3024 enum dpio_channel port = vlv_dport_to_channel(dport);
3025 int pipe = intel_crtc->pipe;
3027 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3028 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3029 preemph_reg_value = 0x0004000;
3030 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3031 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3032 demph_reg_value = 0x2B405555;
3033 uniqtranscale_reg_value = 0x552AB83A;
3035 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3036 demph_reg_value = 0x2B404040;
3037 uniqtranscale_reg_value = 0x5548B83A;
3039 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3040 demph_reg_value = 0x2B245555;
3041 uniqtranscale_reg_value = 0x5560B83A;
3043 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3044 demph_reg_value = 0x2B405555;
3045 uniqtranscale_reg_value = 0x5598DA3A;
3051 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3052 preemph_reg_value = 0x0002000;
3053 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3054 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3055 demph_reg_value = 0x2B404040;
3056 uniqtranscale_reg_value = 0x5552B83A;
3058 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3059 demph_reg_value = 0x2B404848;
3060 uniqtranscale_reg_value = 0x5580B83A;
3062 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3063 demph_reg_value = 0x2B404040;
3064 uniqtranscale_reg_value = 0x55ADDA3A;
3070 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3071 preemph_reg_value = 0x0000000;
3072 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3073 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3074 demph_reg_value = 0x2B305555;
3075 uniqtranscale_reg_value = 0x5570B83A;
3077 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3078 demph_reg_value = 0x2B2B4040;
3079 uniqtranscale_reg_value = 0x55ADDA3A;
3085 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3086 preemph_reg_value = 0x0006000;
3087 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3088 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3089 demph_reg_value = 0x1B405555;
3090 uniqtranscale_reg_value = 0x55ADDA3A;
3100 mutex_lock(&dev_priv->sb_lock);
3101 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3102 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3103 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3104 uniqtranscale_reg_value);
3105 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3106 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3107 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3108 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3109 mutex_unlock(&dev_priv->sb_lock);
3114 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3116 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3117 struct drm_i915_private *dev_priv = dev->dev_private;
3118 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3119 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3120 u32 deemph_reg_value, margin_reg_value, val;
3121 uint8_t train_set = intel_dp->train_set[0];
3122 enum dpio_channel ch = vlv_dport_to_channel(dport);
3123 enum pipe pipe = intel_crtc->pipe;
3126 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3127 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3128 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3129 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3130 deemph_reg_value = 128;
3131 margin_reg_value = 52;
3133 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3134 deemph_reg_value = 128;
3135 margin_reg_value = 77;
3137 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3138 deemph_reg_value = 128;
3139 margin_reg_value = 102;
3141 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3142 deemph_reg_value = 128;
3143 margin_reg_value = 154;
3144 /* FIXME extra to set for 1200 */
3150 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3151 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3152 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3153 deemph_reg_value = 85;
3154 margin_reg_value = 78;
3156 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3157 deemph_reg_value = 85;
3158 margin_reg_value = 116;
3160 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3161 deemph_reg_value = 85;
3162 margin_reg_value = 154;
3168 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3169 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3170 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3171 deemph_reg_value = 64;
3172 margin_reg_value = 104;
3174 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3175 deemph_reg_value = 64;
3176 margin_reg_value = 154;
3182 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3183 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3184 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3185 deemph_reg_value = 43;
3186 margin_reg_value = 154;
3196 mutex_lock(&dev_priv->sb_lock);
3198 /* Clear calc init */
3199 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3200 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3201 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3202 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3203 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3205 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3206 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3207 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3208 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3209 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3211 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3212 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3213 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3214 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3216 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3217 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3218 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3219 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3221 /* Program swing deemph */
3222 for (i = 0; i < 4; i++) {
3223 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3224 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3225 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3226 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3229 /* Program swing margin */
3230 for (i = 0; i < 4; i++) {
3231 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3232 val &= ~DPIO_SWING_MARGIN000_MASK;
3233 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3234 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3237 /* Disable unique transition scale */
3238 for (i = 0; i < 4; i++) {
3239 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3240 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3241 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3244 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3245 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3246 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3247 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3250 * The document said it needs to set bit 27 for ch0 and bit 26
3251 * for ch1. Might be a typo in the doc.
3252 * For now, for this unique transition scale selection, set bit
3253 * 27 for ch0 and ch1.
3255 for (i = 0; i < 4; i++) {
3256 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3257 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3258 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3261 for (i = 0; i < 4; i++) {
3262 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3263 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3264 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3265 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3269 /* Start swing calculation */
3270 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3271 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3272 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3274 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3275 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3276 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3279 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3280 val |= DPIO_LRC_BYPASS;
3281 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3283 mutex_unlock(&dev_priv->sb_lock);
3289 intel_get_adjust_train(struct intel_dp *intel_dp,
3290 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3295 uint8_t voltage_max;
3296 uint8_t preemph_max;
3298 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3299 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3300 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3308 voltage_max = intel_dp_voltage_max(intel_dp);
3309 if (v >= voltage_max)
3310 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3312 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3313 if (p >= preemph_max)
3314 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3316 for (lane = 0; lane < 4; lane++)
3317 intel_dp->train_set[lane] = v | p;
3321 gen4_signal_levels(uint8_t train_set)
3323 uint32_t signal_levels = 0;
3325 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3326 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3328 signal_levels |= DP_VOLTAGE_0_4;
3330 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3331 signal_levels |= DP_VOLTAGE_0_6;
3333 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3334 signal_levels |= DP_VOLTAGE_0_8;
3336 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3337 signal_levels |= DP_VOLTAGE_1_2;
3340 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3341 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3343 signal_levels |= DP_PRE_EMPHASIS_0;
3345 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3346 signal_levels |= DP_PRE_EMPHASIS_3_5;
3348 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3349 signal_levels |= DP_PRE_EMPHASIS_6;
3351 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3352 signal_levels |= DP_PRE_EMPHASIS_9_5;
3355 return signal_levels;
3358 /* Gen6's DP voltage swing and pre-emphasis control */
3360 gen6_edp_signal_levels(uint8_t train_set)
3362 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3363 DP_TRAIN_PRE_EMPHASIS_MASK);
3364 switch (signal_levels) {
3365 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3366 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3367 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3368 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3369 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3370 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3371 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3372 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3373 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3374 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3375 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3376 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3377 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3378 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3380 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3381 "0x%x\n", signal_levels);
3382 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3386 /* Gen7's DP voltage swing and pre-emphasis control */
3388 gen7_edp_signal_levels(uint8_t train_set)
3390 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3391 DP_TRAIN_PRE_EMPHASIS_MASK);
3392 switch (signal_levels) {
3393 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3394 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3395 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3396 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3397 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3398 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3400 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3401 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3402 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3403 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3405 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3406 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3407 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3408 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3411 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3412 "0x%x\n", signal_levels);
3413 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3417 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3419 hsw_signal_levels(uint8_t train_set)
3421 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3422 DP_TRAIN_PRE_EMPHASIS_MASK);
3423 switch (signal_levels) {
3424 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3425 return DDI_BUF_TRANS_SELECT(0);
3426 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3427 return DDI_BUF_TRANS_SELECT(1);
3428 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3429 return DDI_BUF_TRANS_SELECT(2);
3430 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3431 return DDI_BUF_TRANS_SELECT(3);
3433 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3434 return DDI_BUF_TRANS_SELECT(4);
3435 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3436 return DDI_BUF_TRANS_SELECT(5);
3437 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3438 return DDI_BUF_TRANS_SELECT(6);
3440 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3441 return DDI_BUF_TRANS_SELECT(7);
3442 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3443 return DDI_BUF_TRANS_SELECT(8);
3445 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3446 return DDI_BUF_TRANS_SELECT(9);
3448 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3449 "0x%x\n", signal_levels);
3450 return DDI_BUF_TRANS_SELECT(0);
3454 static void bxt_signal_levels(struct intel_dp *intel_dp)
3456 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3457 enum port port = dport->port;
3458 struct drm_device *dev = dport->base.base.dev;
3459 struct intel_encoder *encoder = &dport->base;
3460 uint8_t train_set = intel_dp->train_set[0];
3463 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3464 DP_TRAIN_PRE_EMPHASIS_MASK);
3465 switch (signal_levels) {
3467 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
3468 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3471 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3474 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3477 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3480 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3483 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3486 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3489 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3492 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3495 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3500 bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
3503 /* Properly updates "DP" with the correct signal levels. */
3505 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3507 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3508 enum port port = intel_dig_port->port;
3509 struct drm_device *dev = intel_dig_port->base.base.dev;
3510 uint32_t signal_levels, mask;
3511 uint8_t train_set = intel_dp->train_set[0];
3513 if (IS_BROXTON(dev)) {
3515 bxt_signal_levels(intel_dp);
3517 } else if (HAS_DDI(dev)) {
3518 signal_levels = hsw_signal_levels(train_set);
3519 mask = DDI_BUF_EMP_MASK;
3520 } else if (IS_CHERRYVIEW(dev)) {
3521 signal_levels = chv_signal_levels(intel_dp);
3523 } else if (IS_VALLEYVIEW(dev)) {
3524 signal_levels = vlv_signal_levels(intel_dp);
3526 } else if (IS_GEN7(dev) && port == PORT_A) {
3527 signal_levels = gen7_edp_signal_levels(train_set);
3528 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3529 } else if (IS_GEN6(dev) && port == PORT_A) {
3530 signal_levels = gen6_edp_signal_levels(train_set);
3531 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3533 signal_levels = gen4_signal_levels(train_set);
3534 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3538 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3540 DRM_DEBUG_KMS("Using vswing level %d\n",
3541 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3542 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3543 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3544 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3546 *DP = (*DP & ~mask) | signal_levels;
3550 intel_dp_set_link_train(struct intel_dp *intel_dp,
3552 uint8_t dp_train_pat)
3554 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3555 struct drm_device *dev = intel_dig_port->base.base.dev;
3556 struct drm_i915_private *dev_priv = dev->dev_private;
3557 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3560 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3562 I915_WRITE(intel_dp->output_reg, *DP);
3563 POSTING_READ(intel_dp->output_reg);
3565 buf[0] = dp_train_pat;
3566 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3567 DP_TRAINING_PATTERN_DISABLE) {
3568 /* don't write DP_TRAINING_LANEx_SET on disable */
3571 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3572 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3573 len = intel_dp->lane_count + 1;
3576 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3583 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3584 uint8_t dp_train_pat)
3586 if (!intel_dp->train_set_valid)
3587 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3588 intel_dp_set_signal_levels(intel_dp, DP);
3589 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3593 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3594 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3596 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3597 struct drm_device *dev = intel_dig_port->base.base.dev;
3598 struct drm_i915_private *dev_priv = dev->dev_private;
3601 intel_get_adjust_train(intel_dp, link_status);
3602 intel_dp_set_signal_levels(intel_dp, DP);
3604 I915_WRITE(intel_dp->output_reg, *DP);
3605 POSTING_READ(intel_dp->output_reg);
3607 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3608 intel_dp->train_set, intel_dp->lane_count);
3610 return ret == intel_dp->lane_count;
3613 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3615 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3616 struct drm_device *dev = intel_dig_port->base.base.dev;
3617 struct drm_i915_private *dev_priv = dev->dev_private;
3618 enum port port = intel_dig_port->port;
3624 val = I915_READ(DP_TP_CTL(port));
3625 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3626 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3627 I915_WRITE(DP_TP_CTL(port), val);
3630 * On PORT_A we can have only eDP in SST mode. There the only reason
3631 * we need to set idle transmission mode is to work around a HW issue
3632 * where we enable the pipe while not in idle link-training mode.
3633 * In this case there is requirement to wait for a minimum number of
3634 * idle patterns to be sent.
3639 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3641 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3644 /* Enable corresponding port and start training pattern 1 */
3646 intel_dp_start_link_train(struct intel_dp *intel_dp)
3648 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3649 struct drm_device *dev = encoder->dev;
3652 int voltage_tries, loop_tries;
3653 uint32_t DP = intel_dp->DP;
3654 uint8_t link_config[2];
3657 intel_ddi_prepare_link_retrain(encoder);
3659 /* Write the link configuration data */
3660 link_config[0] = intel_dp->link_bw;
3661 link_config[1] = intel_dp->lane_count;
3662 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3663 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3664 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3665 if (intel_dp->num_sink_rates)
3666 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3667 &intel_dp->rate_select, 1);
3670 link_config[1] = DP_SET_ANSI_8B10B;
3671 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3675 /* clock recovery */
3676 if (!intel_dp_reset_link_train(intel_dp, &DP,
3677 DP_TRAINING_PATTERN_1 |
3678 DP_LINK_SCRAMBLING_DISABLE)) {
3679 DRM_ERROR("failed to enable link training\n");
3687 uint8_t link_status[DP_LINK_STATUS_SIZE];
3689 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3690 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3691 DRM_ERROR("failed to get link status\n");
3695 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3696 DRM_DEBUG_KMS("clock recovery OK\n");
3701 * if we used previously trained voltage and pre-emphasis values
3702 * and we don't get clock recovery, reset link training values
3704 if (intel_dp->train_set_valid) {
3705 DRM_DEBUG_KMS("clock recovery not ok, reset");
3706 /* clear the flag as we are not reusing train set */
3707 intel_dp->train_set_valid = false;
3708 if (!intel_dp_reset_link_train(intel_dp, &DP,
3709 DP_TRAINING_PATTERN_1 |
3710 DP_LINK_SCRAMBLING_DISABLE)) {
3711 DRM_ERROR("failed to enable link training\n");
3717 /* Check to see if we've tried the max voltage */
3718 for (i = 0; i < intel_dp->lane_count; i++)
3719 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3721 if (i == intel_dp->lane_count) {
3723 if (loop_tries == 5) {
3724 DRM_ERROR("too many full retries, give up\n");
3727 intel_dp_reset_link_train(intel_dp, &DP,
3728 DP_TRAINING_PATTERN_1 |
3729 DP_LINK_SCRAMBLING_DISABLE);
3734 /* Check to see if we've tried the same voltage 5 times */
3735 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3737 if (voltage_tries == 5) {
3738 DRM_ERROR("too many voltage retries, give up\n");
3743 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3745 /* Update training set as requested by target */
3746 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3747 DRM_ERROR("failed to update link training\n");
3756 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3758 bool channel_eq = false;
3759 int tries, cr_tries;
3760 uint32_t DP = intel_dp->DP;
3761 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3763 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3764 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3765 training_pattern = DP_TRAINING_PATTERN_3;
3767 /* channel equalization */
3768 if (!intel_dp_set_link_train(intel_dp, &DP,
3770 DP_LINK_SCRAMBLING_DISABLE)) {
3771 DRM_ERROR("failed to start channel equalization\n");
3779 uint8_t link_status[DP_LINK_STATUS_SIZE];
3782 DRM_ERROR("failed to train DP, aborting\n");
3786 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3787 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3788 DRM_ERROR("failed to get link status\n");
3792 /* Make sure clock is still ok */
3793 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3794 intel_dp->train_set_valid = false;
3795 intel_dp_start_link_train(intel_dp);
3796 intel_dp_set_link_train(intel_dp, &DP,
3798 DP_LINK_SCRAMBLING_DISABLE);
3803 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3808 /* Try 5 times, then try clock recovery if that fails */
3810 intel_dp->train_set_valid = false;
3811 intel_dp_start_link_train(intel_dp);
3812 intel_dp_set_link_train(intel_dp, &DP,
3814 DP_LINK_SCRAMBLING_DISABLE);
3820 /* Update training set as requested by target */
3821 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3822 DRM_ERROR("failed to update link training\n");
3828 intel_dp_set_idle_link_train(intel_dp);
3833 intel_dp->train_set_valid = true;
3834 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3838 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3840 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3841 DP_TRAINING_PATTERN_DISABLE);
3845 intel_dp_link_down(struct intel_dp *intel_dp)
3847 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3848 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3849 enum port port = intel_dig_port->port;
3850 struct drm_device *dev = intel_dig_port->base.base.dev;
3851 struct drm_i915_private *dev_priv = dev->dev_private;
3852 uint32_t DP = intel_dp->DP;
3854 if (WARN_ON(HAS_DDI(dev)))
3857 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3860 DRM_DEBUG_KMS("\n");
3862 if ((IS_GEN7(dev) && port == PORT_A) ||
3863 (HAS_PCH_CPT(dev) && port != PORT_A)) {
3864 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3865 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3867 if (IS_CHERRYVIEW(dev))
3868 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3870 DP &= ~DP_LINK_TRAIN_MASK;
3871 DP |= DP_LINK_TRAIN_PAT_IDLE;
3873 I915_WRITE(intel_dp->output_reg, DP);
3874 POSTING_READ(intel_dp->output_reg);
3876 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3877 I915_WRITE(intel_dp->output_reg, DP);
3878 POSTING_READ(intel_dp->output_reg);
3881 * HW workaround for IBX, we need to move the port
3882 * to transcoder A after disabling it to allow the
3883 * matching HDMI port to be enabled on transcoder A.
3885 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3886 /* always enable with pattern 1 (as per spec) */
3887 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3888 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3889 I915_WRITE(intel_dp->output_reg, DP);
3890 POSTING_READ(intel_dp->output_reg);
3893 I915_WRITE(intel_dp->output_reg, DP);
3894 POSTING_READ(intel_dp->output_reg);
3897 msleep(intel_dp->panel_power_down_delay);
3901 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3903 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3904 struct drm_device *dev = dig_port->base.base.dev;
3905 struct drm_i915_private *dev_priv = dev->dev_private;
3908 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3909 sizeof(intel_dp->dpcd)) < 0)
3910 return false; /* aux transfer failed */
3912 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3914 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3915 return false; /* DPCD not present */
3917 /* Check if the panel supports PSR */
3918 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3919 if (is_edp(intel_dp)) {
3920 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3922 sizeof(intel_dp->psr_dpcd));
3923 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3924 dev_priv->psr.sink_support = true;
3925 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3928 if (INTEL_INFO(dev)->gen >= 9 &&
3929 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3930 uint8_t frame_sync_cap;
3932 dev_priv->psr.sink_support = true;
3933 intel_dp_dpcd_read_wake(&intel_dp->aux,
3934 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3935 &frame_sync_cap, 1);
3936 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3937 /* PSR2 needs frame sync as well */
3938 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3939 DRM_DEBUG_KMS("PSR2 %s on sink",
3940 dev_priv->psr.psr2_support ? "supported" : "not supported");
3944 /* Training Pattern 3 support, both source and sink */
3945 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3946 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3947 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3948 intel_dp->use_tps3 = true;
3949 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3951 intel_dp->use_tps3 = false;
3953 /* Intermediate frequency support */
3954 if (is_edp(intel_dp) &&
3955 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3956 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3957 (rev >= 0x03)) { /* eDp v1.4 or higher */
3958 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3961 intel_dp_dpcd_read_wake(&intel_dp->aux,
3962 DP_SUPPORTED_LINK_RATES,
3964 sizeof(sink_rates));
3966 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3967 int val = le16_to_cpu(sink_rates[i]);
3972 /* Value read is in kHz while drm clock is saved in deca-kHz */
3973 intel_dp->sink_rates[i] = (val * 200) / 10;
3975 intel_dp->num_sink_rates = i;
3978 intel_dp_print_rates(intel_dp);
3980 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3981 DP_DWN_STRM_PORT_PRESENT))
3982 return true; /* native DP sink */
3984 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3985 return true; /* no per-port downstream info */
3987 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3988 intel_dp->downstream_ports,
3989 DP_MAX_DOWNSTREAM_PORTS) < 0)
3990 return false; /* downstream port status fetch failed */
3996 intel_dp_probe_oui(struct intel_dp *intel_dp)
4000 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4003 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
4004 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4005 buf[0], buf[1], buf[2]);
4007 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4008 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4009 buf[0], buf[1], buf[2]);
4013 intel_dp_probe_mst(struct intel_dp *intel_dp)
4017 if (!intel_dp->can_mst)
4020 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4023 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4024 if (buf[0] & DP_MST_CAP) {
4025 DRM_DEBUG_KMS("Sink is MST capable\n");
4026 intel_dp->is_mst = true;
4028 DRM_DEBUG_KMS("Sink is not MST capable\n");
4029 intel_dp->is_mst = false;
4033 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4034 return intel_dp->is_mst;
4037 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4039 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4040 struct drm_device *dev = intel_dig_port->base.base.dev;
4041 struct intel_crtc *intel_crtc =
4042 to_intel_crtc(intel_dig_port->base.base.crtc);
4048 hsw_disable_ips(intel_crtc);
4050 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4055 if (!(buf & DP_TEST_CRC_SUPPORTED)) {
4060 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4065 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4066 buf | DP_TEST_SINK_START) < 0) {
4071 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4076 test_crc_count = buf & DP_TEST_COUNT_MASK;
4079 if (drm_dp_dpcd_readb(&intel_dp->aux,
4080 DP_TEST_SINK_MISC, &buf) < 0) {
4084 intel_wait_for_vblank(dev, intel_crtc->pipe);
4085 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4087 if (attempts == 0) {
4088 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4093 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4098 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4102 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4103 buf & ~DP_TEST_SINK_START) < 0) {
4108 hsw_enable_ips(intel_crtc);
4113 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4115 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4116 DP_DEVICE_SERVICE_IRQ_VECTOR,
4117 sink_irq_vector, 1) == 1;
4121 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4125 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4127 sink_irq_vector, 14);
4134 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4136 uint8_t test_result = DP_TEST_ACK;
4140 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4142 uint8_t test_result = DP_TEST_NAK;
4146 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4148 uint8_t test_result = DP_TEST_NAK;
4149 struct intel_connector *intel_connector = intel_dp->attached_connector;
4150 struct drm_connector *connector = &intel_connector->base;
4152 if (intel_connector->detect_edid == NULL ||
4153 connector->edid_corrupt ||
4154 intel_dp->aux.i2c_defer_count > 6) {
4155 /* Check EDID read for NACKs, DEFERs and corruption
4156 * (DP CTS 1.2 Core r1.1)
4157 * 4.2.2.4 : Failed EDID read, I2C_NAK
4158 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4159 * 4.2.2.6 : EDID corruption detected
4160 * Use failsafe mode for all cases
4162 if (intel_dp->aux.i2c_nack_count > 0 ||
4163 intel_dp->aux.i2c_defer_count > 0)
4164 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4165 intel_dp->aux.i2c_nack_count,
4166 intel_dp->aux.i2c_defer_count);
4167 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4169 if (!drm_dp_dpcd_write(&intel_dp->aux,
4170 DP_TEST_EDID_CHECKSUM,
4171 &intel_connector->detect_edid->checksum,
4173 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4175 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4176 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4179 /* Set test active flag here so userspace doesn't interrupt things */
4180 intel_dp->compliance_test_active = 1;
4185 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4187 uint8_t test_result = DP_TEST_NAK;
4191 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4193 uint8_t response = DP_TEST_NAK;
4197 intel_dp->compliance_test_active = 0;
4198 intel_dp->compliance_test_type = 0;
4199 intel_dp->compliance_test_data = 0;
4201 intel_dp->aux.i2c_nack_count = 0;
4202 intel_dp->aux.i2c_defer_count = 0;
4204 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4206 DRM_DEBUG_KMS("Could not read test request from sink\n");
4211 case DP_TEST_LINK_TRAINING:
4212 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4213 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4214 response = intel_dp_autotest_link_training(intel_dp);
4216 case DP_TEST_LINK_VIDEO_PATTERN:
4217 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4218 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4219 response = intel_dp_autotest_video_pattern(intel_dp);
4221 case DP_TEST_LINK_EDID_READ:
4222 DRM_DEBUG_KMS("EDID test requested\n");
4223 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4224 response = intel_dp_autotest_edid(intel_dp);
4226 case DP_TEST_LINK_PHY_TEST_PATTERN:
4227 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4228 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4229 response = intel_dp_autotest_phy_pattern(intel_dp);
4232 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4237 status = drm_dp_dpcd_write(&intel_dp->aux,
4241 DRM_DEBUG_KMS("Could not write test response to sink\n");
4245 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4249 if (intel_dp->is_mst) {
4254 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4258 /* check link status - esi[10] = 0x200c */
4259 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4260 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4261 intel_dp_start_link_train(intel_dp);
4262 intel_dp_complete_link_train(intel_dp);
4263 intel_dp_stop_link_train(intel_dp);
4266 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4267 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4270 for (retry = 0; retry < 3; retry++) {
4272 wret = drm_dp_dpcd_write(&intel_dp->aux,
4273 DP_SINK_COUNT_ESI+1,
4280 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4282 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4290 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4291 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4292 intel_dp->is_mst = false;
4293 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4294 /* send a hotplug event */
4295 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4302 * According to DP spec
4305 * 2. Configure link according to Receiver Capabilities
4306 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4307 * 4. Check link status on receipt of hot-plug interrupt
4310 intel_dp_check_link_status(struct intel_dp *intel_dp)
4312 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4313 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4315 u8 link_status[DP_LINK_STATUS_SIZE];
4317 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4319 if (!intel_encoder->connectors_active)
4322 if (WARN_ON(!intel_encoder->base.crtc))
4325 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4328 /* Try to read receiver status if the link appears to be up */
4329 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4333 /* Now read the DPCD to see if it's actually running */
4334 if (!intel_dp_get_dpcd(intel_dp)) {
4338 /* Try to read the source of the interrupt */
4339 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4340 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4341 /* Clear interrupt source */
4342 drm_dp_dpcd_writeb(&intel_dp->aux,
4343 DP_DEVICE_SERVICE_IRQ_VECTOR,
4346 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4347 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4348 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4349 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4352 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4353 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4354 intel_encoder->base.name);
4355 intel_dp_start_link_train(intel_dp);
4356 intel_dp_complete_link_train(intel_dp);
4357 intel_dp_stop_link_train(intel_dp);
4361 /* XXX this is probably wrong for multiple downstream ports */
4362 static enum drm_connector_status
4363 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4365 uint8_t *dpcd = intel_dp->dpcd;
4368 if (!intel_dp_get_dpcd(intel_dp))
4369 return connector_status_disconnected;
4371 /* if there's no downstream port, we're done */
4372 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4373 return connector_status_connected;
4375 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4376 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4377 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4380 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4382 return connector_status_unknown;
4384 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4385 : connector_status_disconnected;
4388 /* If no HPD, poke DDC gently */
4389 if (drm_probe_ddc(&intel_dp->aux.ddc))
4390 return connector_status_connected;
4392 /* Well we tried, say unknown for unreliable port types */
4393 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4394 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4395 if (type == DP_DS_PORT_TYPE_VGA ||
4396 type == DP_DS_PORT_TYPE_NON_EDID)
4397 return connector_status_unknown;
4399 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4400 DP_DWN_STRM_PORT_TYPE_MASK;
4401 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4402 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4403 return connector_status_unknown;
4406 /* Anything else is out of spec, warn and ignore */
4407 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4408 return connector_status_disconnected;
4411 static enum drm_connector_status
4412 edp_detect(struct intel_dp *intel_dp)
4414 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4415 enum drm_connector_status status;
4417 status = intel_panel_detect(dev);
4418 if (status == connector_status_unknown)
4419 status = connector_status_connected;
4424 static enum drm_connector_status
4425 ironlake_dp_detect(struct intel_dp *intel_dp)
4427 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4428 struct drm_i915_private *dev_priv = dev->dev_private;
4429 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4431 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4432 return connector_status_disconnected;
4434 return intel_dp_detect_dpcd(intel_dp);
4437 static int g4x_digital_port_connected(struct drm_device *dev,
4438 struct intel_digital_port *intel_dig_port)
4440 struct drm_i915_private *dev_priv = dev->dev_private;
4443 if (IS_VALLEYVIEW(dev)) {
4444 switch (intel_dig_port->port) {
4446 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4449 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4452 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4458 switch (intel_dig_port->port) {
4460 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4463 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4466 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4473 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4478 static enum drm_connector_status
4479 g4x_dp_detect(struct intel_dp *intel_dp)
4481 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4482 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4485 /* Can't disconnect eDP, but you can close the lid... */
4486 if (is_edp(intel_dp)) {
4487 enum drm_connector_status status;
4489 status = intel_panel_detect(dev);
4490 if (status == connector_status_unknown)
4491 status = connector_status_connected;
4495 ret = g4x_digital_port_connected(dev, intel_dig_port);
4497 return connector_status_unknown;
4499 return connector_status_disconnected;
4501 return intel_dp_detect_dpcd(intel_dp);
4504 static struct edid *
4505 intel_dp_get_edid(struct intel_dp *intel_dp)
4507 struct intel_connector *intel_connector = intel_dp->attached_connector;
4509 /* use cached edid if we have one */
4510 if (intel_connector->edid) {
4512 if (IS_ERR(intel_connector->edid))
4515 return drm_edid_duplicate(intel_connector->edid);
4517 return drm_get_edid(&intel_connector->base,
4518 &intel_dp->aux.ddc);
4522 intel_dp_set_edid(struct intel_dp *intel_dp)
4524 struct intel_connector *intel_connector = intel_dp->attached_connector;
4527 edid = intel_dp_get_edid(intel_dp);
4528 intel_connector->detect_edid = edid;
4530 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4531 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4533 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4537 intel_dp_unset_edid(struct intel_dp *intel_dp)
4539 struct intel_connector *intel_connector = intel_dp->attached_connector;
4541 kfree(intel_connector->detect_edid);
4542 intel_connector->detect_edid = NULL;
4544 intel_dp->has_audio = false;
4547 static enum intel_display_power_domain
4548 intel_dp_power_get(struct intel_dp *dp)
4550 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4551 enum intel_display_power_domain power_domain;
4553 power_domain = intel_display_port_power_domain(encoder);
4554 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4556 return power_domain;
4560 intel_dp_power_put(struct intel_dp *dp,
4561 enum intel_display_power_domain power_domain)
4563 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4564 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4567 static enum drm_connector_status
4568 intel_dp_detect(struct drm_connector *connector, bool force)
4570 struct intel_dp *intel_dp = intel_attached_dp(connector);
4571 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4572 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4573 struct drm_device *dev = connector->dev;
4574 enum drm_connector_status status;
4575 enum intel_display_power_domain power_domain;
4579 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4580 connector->base.id, connector->name);
4581 intel_dp_unset_edid(intel_dp);
4583 if (intel_dp->is_mst) {
4584 /* MST devices are disconnected from a monitor POV */
4585 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4586 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4587 return connector_status_disconnected;
4590 power_domain = intel_dp_power_get(intel_dp);
4592 /* Can't disconnect eDP, but you can close the lid... */
4593 if (is_edp(intel_dp))
4594 status = edp_detect(intel_dp);
4595 else if (HAS_PCH_SPLIT(dev))
4596 status = ironlake_dp_detect(intel_dp);
4598 status = g4x_dp_detect(intel_dp);
4599 if (status != connector_status_connected)
4602 intel_dp_probe_oui(intel_dp);
4604 ret = intel_dp_probe_mst(intel_dp);
4606 /* if we are in MST mode then this connector
4607 won't appear connected or have anything with EDID on it */
4608 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4609 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4610 status = connector_status_disconnected;
4614 intel_dp_set_edid(intel_dp);
4616 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4617 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4618 status = connector_status_connected;
4620 /* Try to read the source of the interrupt */
4621 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4622 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4623 /* Clear interrupt source */
4624 drm_dp_dpcd_writeb(&intel_dp->aux,
4625 DP_DEVICE_SERVICE_IRQ_VECTOR,
4628 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4629 intel_dp_handle_test_request(intel_dp);
4630 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4631 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4635 intel_dp_power_put(intel_dp, power_domain);
4640 intel_dp_force(struct drm_connector *connector)
4642 struct intel_dp *intel_dp = intel_attached_dp(connector);
4643 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4644 enum intel_display_power_domain power_domain;
4646 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4647 connector->base.id, connector->name);
4648 intel_dp_unset_edid(intel_dp);
4650 if (connector->status != connector_status_connected)
4653 power_domain = intel_dp_power_get(intel_dp);
4655 intel_dp_set_edid(intel_dp);
4657 intel_dp_power_put(intel_dp, power_domain);
4659 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4660 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4663 static int intel_dp_get_modes(struct drm_connector *connector)
4665 struct intel_connector *intel_connector = to_intel_connector(connector);
4668 edid = intel_connector->detect_edid;
4670 int ret = intel_connector_update_modes(connector, edid);
4675 /* if eDP has no EDID, fall back to fixed mode */
4676 if (is_edp(intel_attached_dp(connector)) &&
4677 intel_connector->panel.fixed_mode) {
4678 struct drm_display_mode *mode;
4680 mode = drm_mode_duplicate(connector->dev,
4681 intel_connector->panel.fixed_mode);
4683 drm_mode_probed_add(connector, mode);
4692 intel_dp_detect_audio(struct drm_connector *connector)
4694 bool has_audio = false;
4697 edid = to_intel_connector(connector)->detect_edid;
4699 has_audio = drm_detect_monitor_audio(edid);
4705 intel_dp_set_property(struct drm_connector *connector,
4706 struct drm_property *property,
4709 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4710 struct intel_connector *intel_connector = to_intel_connector(connector);
4711 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4712 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4715 ret = drm_object_property_set_value(&connector->base, property, val);
4719 if (property == dev_priv->force_audio_property) {
4723 if (i == intel_dp->force_audio)
4726 intel_dp->force_audio = i;
4728 if (i == HDMI_AUDIO_AUTO)
4729 has_audio = intel_dp_detect_audio(connector);
4731 has_audio = (i == HDMI_AUDIO_ON);
4733 if (has_audio == intel_dp->has_audio)
4736 intel_dp->has_audio = has_audio;
4740 if (property == dev_priv->broadcast_rgb_property) {
4741 bool old_auto = intel_dp->color_range_auto;
4742 uint32_t old_range = intel_dp->color_range;
4745 case INTEL_BROADCAST_RGB_AUTO:
4746 intel_dp->color_range_auto = true;
4748 case INTEL_BROADCAST_RGB_FULL:
4749 intel_dp->color_range_auto = false;
4750 intel_dp->color_range = 0;
4752 case INTEL_BROADCAST_RGB_LIMITED:
4753 intel_dp->color_range_auto = false;
4754 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4760 if (old_auto == intel_dp->color_range_auto &&
4761 old_range == intel_dp->color_range)
4767 if (is_edp(intel_dp) &&
4768 property == connector->dev->mode_config.scaling_mode_property) {
4769 if (val == DRM_MODE_SCALE_NONE) {
4770 DRM_DEBUG_KMS("no scaling not supported\n");
4774 if (intel_connector->panel.fitting_mode == val) {
4775 /* the eDP scaling property is not changed */
4778 intel_connector->panel.fitting_mode = val;
4786 if (intel_encoder->base.crtc)
4787 intel_crtc_restore_mode(intel_encoder->base.crtc);
4793 intel_dp_connector_destroy(struct drm_connector *connector)
4795 struct intel_connector *intel_connector = to_intel_connector(connector);
4797 kfree(intel_connector->detect_edid);
4799 if (!IS_ERR_OR_NULL(intel_connector->edid))
4800 kfree(intel_connector->edid);
4802 /* Can't call is_edp() since the encoder may have been destroyed
4804 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4805 intel_panel_fini(&intel_connector->panel);
4807 drm_connector_cleanup(connector);
4811 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4813 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4814 struct intel_dp *intel_dp = &intel_dig_port->dp;
4816 drm_dp_aux_unregister(&intel_dp->aux);
4817 intel_dp_mst_encoder_cleanup(intel_dig_port);
4818 if (is_edp(intel_dp)) {
4819 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4821 * vdd might still be enabled do to the delayed vdd off.
4822 * Make sure vdd is actually turned off here.
4825 edp_panel_vdd_off_sync(intel_dp);
4826 pps_unlock(intel_dp);
4828 if (intel_dp->edp_notifier.notifier_call) {
4829 unregister_reboot_notifier(&intel_dp->edp_notifier);
4830 intel_dp->edp_notifier.notifier_call = NULL;
4833 drm_encoder_cleanup(encoder);
4834 kfree(intel_dig_port);
4837 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4839 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4841 if (!is_edp(intel_dp))
4845 * vdd might still be enabled do to the delayed vdd off.
4846 * Make sure vdd is actually turned off here.
4848 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4850 edp_panel_vdd_off_sync(intel_dp);
4851 pps_unlock(intel_dp);
4854 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4856 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4857 struct drm_device *dev = intel_dig_port->base.base.dev;
4858 struct drm_i915_private *dev_priv = dev->dev_private;
4859 enum intel_display_power_domain power_domain;
4861 lockdep_assert_held(&dev_priv->pps_mutex);
4863 if (!edp_have_panel_vdd(intel_dp))
4867 * The VDD bit needs a power domain reference, so if the bit is
4868 * already enabled when we boot or resume, grab this reference and
4869 * schedule a vdd off, so we don't hold on to the reference
4872 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4873 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4874 intel_display_power_get(dev_priv, power_domain);
4876 edp_panel_vdd_schedule_off(intel_dp);
4879 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4881 struct intel_dp *intel_dp;
4883 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4886 intel_dp = enc_to_intel_dp(encoder);
4891 * Read out the current power sequencer assignment,
4892 * in case the BIOS did something with it.
4894 if (IS_VALLEYVIEW(encoder->dev))
4895 vlv_initial_power_sequencer_setup(intel_dp);
4897 intel_edp_panel_vdd_sanitize(intel_dp);
4899 pps_unlock(intel_dp);
4902 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4903 .dpms = intel_connector_dpms,
4904 .detect = intel_dp_detect,
4905 .force = intel_dp_force,
4906 .fill_modes = drm_helper_probe_single_connector_modes,
4907 .set_property = intel_dp_set_property,
4908 .atomic_get_property = intel_connector_atomic_get_property,
4909 .destroy = intel_dp_connector_destroy,
4910 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4911 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4914 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4915 .get_modes = intel_dp_get_modes,
4916 .mode_valid = intel_dp_mode_valid,
4917 .best_encoder = intel_best_encoder,
4920 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4921 .reset = intel_dp_encoder_reset,
4922 .destroy = intel_dp_encoder_destroy,
4926 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4932 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4934 struct intel_dp *intel_dp = &intel_dig_port->dp;
4935 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4936 struct drm_device *dev = intel_dig_port->base.base.dev;
4937 struct drm_i915_private *dev_priv = dev->dev_private;
4938 enum intel_display_power_domain power_domain;
4939 enum irqreturn ret = IRQ_NONE;
4941 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4942 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4944 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4946 * vdd off can generate a long pulse on eDP which
4947 * would require vdd on to handle it, and thus we
4948 * would end up in an endless cycle of
4949 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4951 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4952 port_name(intel_dig_port->port));
4956 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4957 port_name(intel_dig_port->port),
4958 long_hpd ? "long" : "short");
4960 power_domain = intel_display_port_power_domain(intel_encoder);
4961 intel_display_power_get(dev_priv, power_domain);
4964 /* indicate that we need to restart link training */
4965 intel_dp->train_set_valid = false;
4967 if (HAS_PCH_SPLIT(dev)) {
4968 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4971 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4975 if (!intel_dp_get_dpcd(intel_dp)) {
4979 intel_dp_probe_oui(intel_dp);
4981 if (!intel_dp_probe_mst(intel_dp))
4985 if (intel_dp->is_mst) {
4986 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4990 if (!intel_dp->is_mst) {
4992 * we'll check the link status via the normal hot plug path later -
4993 * but for short hpds we should check it now
4995 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4996 intel_dp_check_link_status(intel_dp);
4997 drm_modeset_unlock(&dev->mode_config.connection_mutex);
5005 /* if we were in MST mode, and device is not there get out of MST mode */
5006 if (intel_dp->is_mst) {
5007 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5008 intel_dp->is_mst = false;
5009 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5012 intel_display_power_put(dev_priv, power_domain);
5017 /* Return which DP Port should be selected for Transcoder DP control */
5019 intel_trans_dp_port_sel(struct drm_crtc *crtc)
5021 struct drm_device *dev = crtc->dev;
5022 struct intel_encoder *intel_encoder;
5023 struct intel_dp *intel_dp;
5025 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5026 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5028 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5029 intel_encoder->type == INTEL_OUTPUT_EDP)
5030 return intel_dp->output_reg;
5036 /* check the VBT to see whether the eDP is on DP-D port */
5037 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5039 struct drm_i915_private *dev_priv = dev->dev_private;
5040 union child_device_config *p_child;
5042 static const short port_mapping[] = {
5043 [PORT_B] = PORT_IDPB,
5044 [PORT_C] = PORT_IDPC,
5045 [PORT_D] = PORT_IDPD,
5051 if (!dev_priv->vbt.child_dev_num)
5054 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5055 p_child = dev_priv->vbt.child_dev + i;
5057 if (p_child->common.dvo_port == port_mapping[port] &&
5058 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5059 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5066 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5068 struct intel_connector *intel_connector = to_intel_connector(connector);
5070 intel_attach_force_audio_property(connector);
5071 intel_attach_broadcast_rgb_property(connector);
5072 intel_dp->color_range_auto = true;
5074 if (is_edp(intel_dp)) {
5075 drm_mode_create_scaling_mode_property(connector->dev);
5076 drm_object_attach_property(
5078 connector->dev->mode_config.scaling_mode_property,
5079 DRM_MODE_SCALE_ASPECT);
5080 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5084 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5086 intel_dp->last_power_cycle = jiffies;
5087 intel_dp->last_power_on = jiffies;
5088 intel_dp->last_backlight_off = jiffies;
5092 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5093 struct intel_dp *intel_dp)
5095 struct drm_i915_private *dev_priv = dev->dev_private;
5096 struct edp_power_seq cur, vbt, spec,
5097 *final = &intel_dp->pps_delays;
5098 u32 pp_on, pp_off, pp_div, pp;
5099 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5101 lockdep_assert_held(&dev_priv->pps_mutex);
5103 /* already initialized? */
5104 if (final->t11_t12 != 0)
5107 if (HAS_PCH_SPLIT(dev)) {
5108 pp_ctrl_reg = PCH_PP_CONTROL;
5109 pp_on_reg = PCH_PP_ON_DELAYS;
5110 pp_off_reg = PCH_PP_OFF_DELAYS;
5111 pp_div_reg = PCH_PP_DIVISOR;
5113 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5115 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5116 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5117 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5118 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5121 /* Workaround: Need to write PP_CONTROL with the unlock key as
5122 * the very first thing. */
5123 pp = ironlake_get_pp_control(intel_dp);
5124 I915_WRITE(pp_ctrl_reg, pp);
5126 pp_on = I915_READ(pp_on_reg);
5127 pp_off = I915_READ(pp_off_reg);
5128 pp_div = I915_READ(pp_div_reg);
5130 /* Pull timing values out of registers */
5131 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5132 PANEL_POWER_UP_DELAY_SHIFT;
5134 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5135 PANEL_LIGHT_ON_DELAY_SHIFT;
5137 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5138 PANEL_LIGHT_OFF_DELAY_SHIFT;
5140 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5141 PANEL_POWER_DOWN_DELAY_SHIFT;
5143 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5144 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5146 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5147 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5149 vbt = dev_priv->vbt.edp_pps;
5151 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5152 * our hw here, which are all in 100usec. */
5153 spec.t1_t3 = 210 * 10;
5154 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5155 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5156 spec.t10 = 500 * 10;
5157 /* This one is special and actually in units of 100ms, but zero
5158 * based in the hw (so we need to add 100 ms). But the sw vbt
5159 * table multiplies it with 1000 to make it in units of 100usec,
5161 spec.t11_t12 = (510 + 100) * 10;
5163 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5164 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5166 /* Use the max of the register settings and vbt. If both are
5167 * unset, fall back to the spec limits. */
5168 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5170 max(cur.field, vbt.field))
5171 assign_final(t1_t3);
5175 assign_final(t11_t12);
5178 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5179 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5180 intel_dp->backlight_on_delay = get_delay(t8);
5181 intel_dp->backlight_off_delay = get_delay(t9);
5182 intel_dp->panel_power_down_delay = get_delay(t10);
5183 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5186 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5187 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5188 intel_dp->panel_power_cycle_delay);
5190 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5191 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5195 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5196 struct intel_dp *intel_dp)
5198 struct drm_i915_private *dev_priv = dev->dev_private;
5199 u32 pp_on, pp_off, pp_div, port_sel = 0;
5200 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5201 int pp_on_reg, pp_off_reg, pp_div_reg;
5202 enum port port = dp_to_dig_port(intel_dp)->port;
5203 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5205 lockdep_assert_held(&dev_priv->pps_mutex);
5207 if (HAS_PCH_SPLIT(dev)) {
5208 pp_on_reg = PCH_PP_ON_DELAYS;
5209 pp_off_reg = PCH_PP_OFF_DELAYS;
5210 pp_div_reg = PCH_PP_DIVISOR;
5212 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5214 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5215 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5216 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5220 * And finally store the new values in the power sequencer. The
5221 * backlight delays are set to 1 because we do manual waits on them. For
5222 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5223 * we'll end up waiting for the backlight off delay twice: once when we
5224 * do the manual sleep, and once when we disable the panel and wait for
5225 * the PP_STATUS bit to become zero.
5227 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5228 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5229 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5230 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5231 /* Compute the divisor for the pp clock, simply match the Bspec
5233 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5234 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5235 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5237 /* Haswell doesn't have any port selection bits for the panel
5238 * power sequencer any more. */
5239 if (IS_VALLEYVIEW(dev)) {
5240 port_sel = PANEL_PORT_SELECT_VLV(port);
5241 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5243 port_sel = PANEL_PORT_SELECT_DPA;
5245 port_sel = PANEL_PORT_SELECT_DPD;
5250 I915_WRITE(pp_on_reg, pp_on);
5251 I915_WRITE(pp_off_reg, pp_off);
5252 I915_WRITE(pp_div_reg, pp_div);
5254 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5255 I915_READ(pp_on_reg),
5256 I915_READ(pp_off_reg),
5257 I915_READ(pp_div_reg));
5261 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5263 * @refresh_rate: RR to be programmed
5265 * This function gets called when refresh rate (RR) has to be changed from
5266 * one frequency to another. Switches can be between high and low RR
5267 * supported by the panel or to any other RR based on media playback (in
5268 * this case, RR value needs to be passed from user space).
5270 * The caller of this function needs to take a lock on dev_priv->drrs.
5272 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5274 struct drm_i915_private *dev_priv = dev->dev_private;
5275 struct intel_encoder *encoder;
5276 struct intel_digital_port *dig_port = NULL;
5277 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5278 struct intel_crtc_state *config = NULL;
5279 struct intel_crtc *intel_crtc = NULL;
5281 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5283 if (refresh_rate <= 0) {
5284 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5288 if (intel_dp == NULL) {
5289 DRM_DEBUG_KMS("DRRS not supported.\n");
5294 * FIXME: This needs proper synchronization with psr state for some
5295 * platforms that cannot have PSR and DRRS enabled at the same time.
5298 dig_port = dp_to_dig_port(intel_dp);
5299 encoder = &dig_port->base;
5300 intel_crtc = to_intel_crtc(encoder->base.crtc);
5303 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5307 config = intel_crtc->config;
5309 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5310 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5314 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5316 index = DRRS_LOW_RR;
5318 if (index == dev_priv->drrs.refresh_rate_type) {
5320 "DRRS requested for previously set RR...ignoring\n");
5324 if (!intel_crtc->active) {
5325 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5329 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5332 intel_dp_set_m_n(intel_crtc, M1_N1);
5335 intel_dp_set_m_n(intel_crtc, M2_N2);
5339 DRM_ERROR("Unsupported refreshrate type\n");
5341 } else if (INTEL_INFO(dev)->gen > 6) {
5342 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5343 val = I915_READ(reg);
5345 if (index > DRRS_HIGH_RR) {
5346 if (IS_VALLEYVIEW(dev))
5347 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5349 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5351 if (IS_VALLEYVIEW(dev))
5352 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5354 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5356 I915_WRITE(reg, val);
5359 dev_priv->drrs.refresh_rate_type = index;
5361 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5365 * intel_edp_drrs_enable - init drrs struct if supported
5366 * @intel_dp: DP struct
5368 * Initializes frontbuffer_bits and drrs.dp
5370 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5372 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5373 struct drm_i915_private *dev_priv = dev->dev_private;
5374 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5375 struct drm_crtc *crtc = dig_port->base.base.crtc;
5376 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5378 if (!intel_crtc->config->has_drrs) {
5379 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5383 mutex_lock(&dev_priv->drrs.mutex);
5384 if (WARN_ON(dev_priv->drrs.dp)) {
5385 DRM_ERROR("DRRS already enabled\n");
5389 dev_priv->drrs.busy_frontbuffer_bits = 0;
5391 dev_priv->drrs.dp = intel_dp;
5394 mutex_unlock(&dev_priv->drrs.mutex);
5398 * intel_edp_drrs_disable - Disable DRRS
5399 * @intel_dp: DP struct
5402 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5404 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5405 struct drm_i915_private *dev_priv = dev->dev_private;
5406 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5407 struct drm_crtc *crtc = dig_port->base.base.crtc;
5408 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5410 if (!intel_crtc->config->has_drrs)
5413 mutex_lock(&dev_priv->drrs.mutex);
5414 if (!dev_priv->drrs.dp) {
5415 mutex_unlock(&dev_priv->drrs.mutex);
5419 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5420 intel_dp_set_drrs_state(dev_priv->dev,
5421 intel_dp->attached_connector->panel.
5422 fixed_mode->vrefresh);
5424 dev_priv->drrs.dp = NULL;
5425 mutex_unlock(&dev_priv->drrs.mutex);
5427 cancel_delayed_work_sync(&dev_priv->drrs.work);
5430 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5432 struct drm_i915_private *dev_priv =
5433 container_of(work, typeof(*dev_priv), drrs.work.work);
5434 struct intel_dp *intel_dp;
5436 mutex_lock(&dev_priv->drrs.mutex);
5438 intel_dp = dev_priv->drrs.dp;
5444 * The delayed work can race with an invalidate hence we need to
5448 if (dev_priv->drrs.busy_frontbuffer_bits)
5451 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5452 intel_dp_set_drrs_state(dev_priv->dev,
5453 intel_dp->attached_connector->panel.
5454 downclock_mode->vrefresh);
5457 mutex_unlock(&dev_priv->drrs.mutex);
5461 * intel_edp_drrs_invalidate - Invalidate DRRS
5463 * @frontbuffer_bits: frontbuffer plane tracking bits
5465 * When there is a disturbance on screen (due to cursor movement/time
5466 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5469 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5471 void intel_edp_drrs_invalidate(struct drm_device *dev,
5472 unsigned frontbuffer_bits)
5474 struct drm_i915_private *dev_priv = dev->dev_private;
5475 struct drm_crtc *crtc;
5478 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5481 cancel_delayed_work(&dev_priv->drrs.work);
5483 mutex_lock(&dev_priv->drrs.mutex);
5484 if (!dev_priv->drrs.dp) {
5485 mutex_unlock(&dev_priv->drrs.mutex);
5489 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5490 pipe = to_intel_crtc(crtc)->pipe;
5492 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5493 intel_dp_set_drrs_state(dev_priv->dev,
5494 dev_priv->drrs.dp->attached_connector->panel.
5495 fixed_mode->vrefresh);
5498 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5500 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5501 mutex_unlock(&dev_priv->drrs.mutex);
5505 * intel_edp_drrs_flush - Flush DRRS
5507 * @frontbuffer_bits: frontbuffer plane tracking bits
5509 * When there is no movement on screen, DRRS work can be scheduled.
5510 * This DRRS work is responsible for setting relevant registers after a
5511 * timeout of 1 second.
5513 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5515 void intel_edp_drrs_flush(struct drm_device *dev,
5516 unsigned frontbuffer_bits)
5518 struct drm_i915_private *dev_priv = dev->dev_private;
5519 struct drm_crtc *crtc;
5522 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5525 cancel_delayed_work(&dev_priv->drrs.work);
5527 mutex_lock(&dev_priv->drrs.mutex);
5528 if (!dev_priv->drrs.dp) {
5529 mutex_unlock(&dev_priv->drrs.mutex);
5533 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5534 pipe = to_intel_crtc(crtc)->pipe;
5535 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5537 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5538 !dev_priv->drrs.busy_frontbuffer_bits)
5539 schedule_delayed_work(&dev_priv->drrs.work,
5540 msecs_to_jiffies(1000));
5541 mutex_unlock(&dev_priv->drrs.mutex);
5545 * DOC: Display Refresh Rate Switching (DRRS)
5547 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5548 * which enables swtching between low and high refresh rates,
5549 * dynamically, based on the usage scenario. This feature is applicable
5550 * for internal panels.
5552 * Indication that the panel supports DRRS is given by the panel EDID, which
5553 * would list multiple refresh rates for one resolution.
5555 * DRRS is of 2 types - static and seamless.
5556 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5557 * (may appear as a blink on screen) and is used in dock-undock scenario.
5558 * Seamless DRRS involves changing RR without any visual effect to the user
5559 * and can be used during normal system usage. This is done by programming
5560 * certain registers.
5562 * Support for static/seamless DRRS may be indicated in the VBT based on
5563 * inputs from the panel spec.
5565 * DRRS saves power by switching to low RR based on usage scenarios.
5568 * The implementation is based on frontbuffer tracking implementation.
5569 * When there is a disturbance on the screen triggered by user activity or a
5570 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5571 * When there is no movement on screen, after a timeout of 1 second, a switch
5572 * to low RR is made.
5573 * For integration with frontbuffer tracking code,
5574 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5576 * DRRS can be further extended to support other internal panels and also
5577 * the scenario of video playback wherein RR is set based on the rate
5578 * requested by userspace.
5582 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5583 * @intel_connector: eDP connector
5584 * @fixed_mode: preferred mode of panel
5586 * This function is called only once at driver load to initialize basic
5590 * Downclock mode if panel supports it, else return NULL.
5591 * DRRS support is determined by the presence of downclock mode (apart
5592 * from VBT setting).
5594 static struct drm_display_mode *
5595 intel_dp_drrs_init(struct intel_connector *intel_connector,
5596 struct drm_display_mode *fixed_mode)
5598 struct drm_connector *connector = &intel_connector->base;
5599 struct drm_device *dev = connector->dev;
5600 struct drm_i915_private *dev_priv = dev->dev_private;
5601 struct drm_display_mode *downclock_mode = NULL;
5603 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5604 mutex_init(&dev_priv->drrs.mutex);
5606 if (INTEL_INFO(dev)->gen <= 6) {
5607 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5611 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5612 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5616 downclock_mode = intel_find_panel_downclock
5617 (dev, fixed_mode, connector);
5619 if (!downclock_mode) {
5620 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5624 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5626 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5627 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5628 return downclock_mode;
5631 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5632 struct intel_connector *intel_connector)
5634 struct drm_connector *connector = &intel_connector->base;
5635 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5636 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5637 struct drm_device *dev = intel_encoder->base.dev;
5638 struct drm_i915_private *dev_priv = dev->dev_private;
5639 struct drm_display_mode *fixed_mode = NULL;
5640 struct drm_display_mode *downclock_mode = NULL;
5642 struct drm_display_mode *scan;
5644 enum pipe pipe = INVALID_PIPE;
5646 if (!is_edp(intel_dp))
5650 intel_edp_panel_vdd_sanitize(intel_dp);
5651 pps_unlock(intel_dp);
5653 /* Cache DPCD and EDID for edp. */
5654 has_dpcd = intel_dp_get_dpcd(intel_dp);
5657 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5658 dev_priv->no_aux_handshake =
5659 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5660 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5662 /* if this fails, presume the device is a ghost */
5663 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5667 /* We now know it's not a ghost, init power sequence regs. */
5669 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5670 pps_unlock(intel_dp);
5672 mutex_lock(&dev->mode_config.mutex);
5673 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5675 if (drm_add_edid_modes(connector, edid)) {
5676 drm_mode_connector_update_edid_property(connector,
5678 drm_edid_to_eld(connector, edid);
5681 edid = ERR_PTR(-EINVAL);
5684 edid = ERR_PTR(-ENOENT);
5686 intel_connector->edid = edid;
5688 /* prefer fixed mode from EDID if available */
5689 list_for_each_entry(scan, &connector->probed_modes, head) {
5690 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5691 fixed_mode = drm_mode_duplicate(dev, scan);
5692 downclock_mode = intel_dp_drrs_init(
5693 intel_connector, fixed_mode);
5698 /* fallback to VBT if available for eDP */
5699 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5700 fixed_mode = drm_mode_duplicate(dev,
5701 dev_priv->vbt.lfp_lvds_vbt_mode);
5703 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5705 mutex_unlock(&dev->mode_config.mutex);
5707 if (IS_VALLEYVIEW(dev)) {
5708 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5709 register_reboot_notifier(&intel_dp->edp_notifier);
5712 * Figure out the current pipe for the initial backlight setup.
5713 * If the current pipe isn't valid, try the PPS pipe, and if that
5714 * fails just assume pipe A.
5716 if (IS_CHERRYVIEW(dev))
5717 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5719 pipe = PORT_TO_PIPE(intel_dp->DP);
5721 if (pipe != PIPE_A && pipe != PIPE_B)
5722 pipe = intel_dp->pps_pipe;
5724 if (pipe != PIPE_A && pipe != PIPE_B)
5727 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5731 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5732 intel_connector->panel.backlight_power = intel_edp_backlight_power;
5733 intel_panel_setup_backlight(connector, pipe);
5739 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5740 struct intel_connector *intel_connector)
5742 struct drm_connector *connector = &intel_connector->base;
5743 struct intel_dp *intel_dp = &intel_dig_port->dp;
5744 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5745 struct drm_device *dev = intel_encoder->base.dev;
5746 struct drm_i915_private *dev_priv = dev->dev_private;
5747 enum port port = intel_dig_port->port;
5750 intel_dp->pps_pipe = INVALID_PIPE;
5752 /* intel_dp vfuncs */
5753 if (INTEL_INFO(dev)->gen >= 9)
5754 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5755 else if (IS_VALLEYVIEW(dev))
5756 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5757 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5758 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5759 else if (HAS_PCH_SPLIT(dev))
5760 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5762 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5764 if (INTEL_INFO(dev)->gen >= 9)
5765 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5767 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5769 /* Preserve the current hw state. */
5770 intel_dp->DP = I915_READ(intel_dp->output_reg);
5771 intel_dp->attached_connector = intel_connector;
5773 if (intel_dp_is_edp(dev, port))
5774 type = DRM_MODE_CONNECTOR_eDP;
5776 type = DRM_MODE_CONNECTOR_DisplayPort;
5779 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5780 * for DP the encoder type can be set by the caller to
5781 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5783 if (type == DRM_MODE_CONNECTOR_eDP)
5784 intel_encoder->type = INTEL_OUTPUT_EDP;
5786 /* eDP only on port B and/or C on vlv/chv */
5787 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5788 port != PORT_B && port != PORT_C))
5791 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5792 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5795 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5796 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5798 connector->interlace_allowed = true;
5799 connector->doublescan_allowed = 0;
5801 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5802 edp_panel_vdd_work);
5804 intel_connector_attach_encoder(intel_connector, intel_encoder);
5805 drm_connector_register(connector);
5808 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5810 intel_connector->get_hw_state = intel_connector_get_hw_state;
5811 intel_connector->unregister = intel_dp_connector_unregister;
5813 /* Set up the hotplug pin. */
5816 intel_encoder->hpd_pin = HPD_PORT_A;
5819 intel_encoder->hpd_pin = HPD_PORT_B;
5822 intel_encoder->hpd_pin = HPD_PORT_C;
5825 intel_encoder->hpd_pin = HPD_PORT_D;
5831 if (is_edp(intel_dp)) {
5833 intel_dp_init_panel_power_timestamps(intel_dp);
5834 if (IS_VALLEYVIEW(dev))
5835 vlv_initial_power_sequencer_setup(intel_dp);
5837 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5838 pps_unlock(intel_dp);
5841 intel_dp_aux_init(intel_dp, intel_connector);
5843 /* init MST on ports that can support it */
5844 if (HAS_DP_MST(dev) &&
5845 (port == PORT_B || port == PORT_C || port == PORT_D))
5846 intel_dp_mst_encoder_init(intel_dig_port,
5847 intel_connector->base.base.id);
5849 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5850 drm_dp_aux_unregister(&intel_dp->aux);
5851 if (is_edp(intel_dp)) {
5852 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5854 * vdd might still be enabled do to the delayed vdd off.
5855 * Make sure vdd is actually turned off here.
5858 edp_panel_vdd_off_sync(intel_dp);
5859 pps_unlock(intel_dp);
5861 drm_connector_unregister(connector);
5862 drm_connector_cleanup(connector);
5866 intel_dp_add_properties(intel_dp, connector);
5868 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5869 * 0xd. Failure to do so will result in spurious interrupts being
5870 * generated on the port when a cable is not attached.
5872 if (IS_G4X(dev) && !IS_GM45(dev)) {
5873 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5874 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5877 i915_debugfs_connector_add(connector);
5883 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5885 struct drm_i915_private *dev_priv = dev->dev_private;
5886 struct intel_digital_port *intel_dig_port;
5887 struct intel_encoder *intel_encoder;
5888 struct drm_encoder *encoder;
5889 struct intel_connector *intel_connector;
5891 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5892 if (!intel_dig_port)
5895 intel_connector = intel_connector_alloc();
5896 if (!intel_connector) {
5897 kfree(intel_dig_port);
5901 intel_encoder = &intel_dig_port->base;
5902 encoder = &intel_encoder->base;
5904 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5905 DRM_MODE_ENCODER_TMDS);
5907 intel_encoder->compute_config = intel_dp_compute_config;
5908 intel_encoder->disable = intel_disable_dp;
5909 intel_encoder->get_hw_state = intel_dp_get_hw_state;
5910 intel_encoder->get_config = intel_dp_get_config;
5911 intel_encoder->suspend = intel_dp_encoder_suspend;
5912 if (IS_CHERRYVIEW(dev)) {
5913 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5914 intel_encoder->pre_enable = chv_pre_enable_dp;
5915 intel_encoder->enable = vlv_enable_dp;
5916 intel_encoder->post_disable = chv_post_disable_dp;
5917 } else if (IS_VALLEYVIEW(dev)) {
5918 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5919 intel_encoder->pre_enable = vlv_pre_enable_dp;
5920 intel_encoder->enable = vlv_enable_dp;
5921 intel_encoder->post_disable = vlv_post_disable_dp;
5923 intel_encoder->pre_enable = g4x_pre_enable_dp;
5924 intel_encoder->enable = g4x_enable_dp;
5925 if (INTEL_INFO(dev)->gen >= 5)
5926 intel_encoder->post_disable = ilk_post_disable_dp;
5929 intel_dig_port->port = port;
5930 intel_dig_port->dp.output_reg = output_reg;
5932 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5933 if (IS_CHERRYVIEW(dev)) {
5935 intel_encoder->crtc_mask = 1 << 2;
5937 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5939 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5941 intel_encoder->cloneable = 0;
5942 intel_encoder->hot_plug = intel_dp_hot_plug;
5944 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5945 dev_priv->hpd_irq_port[port] = intel_dig_port;
5947 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5948 drm_encoder_cleanup(encoder);
5949 kfree(intel_dig_port);
5950 kfree(intel_connector);
5954 void intel_dp_mst_suspend(struct drm_device *dev)
5956 struct drm_i915_private *dev_priv = dev->dev_private;
5960 for (i = 0; i < I915_MAX_PORTS; i++) {
5961 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5962 if (!intel_dig_port)
5965 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5966 if (!intel_dig_port->dp.can_mst)
5968 if (intel_dig_port->dp.is_mst)
5969 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5974 void intel_dp_mst_resume(struct drm_device *dev)
5976 struct drm_i915_private *dev_priv = dev->dev_private;
5979 for (i = 0; i < I915_MAX_PORTS; i++) {
5980 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5981 if (!intel_dig_port)
5983 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5986 if (!intel_dig_port->dp.can_mst)
5989 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5991 intel_dp_check_mst_status(&intel_dig_port->dp);