2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include <linux/export.h>
29 #include <linux/i2c.h>
30 #include <linux/notifier.h>
31 #include <linux/slab.h>
32 #include <linux/types.h>
34 #include <asm/byteorder.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_crtc.h>
38 #include <drm/drm_dp_helper.h>
39 #include <drm/drm_edid.h>
40 #include <drm/drm_probe_helper.h>
42 #include "i915_debugfs.h"
44 #include "i915_trace.h"
45 #include "intel_atomic.h"
46 #include "intel_audio.h"
47 #include "intel_connector.h"
48 #include "intel_ddi.h"
49 #include "intel_display_types.h"
51 #include "intel_dp_link_training.h"
52 #include "intel_dp_mst.h"
53 #include "intel_dpio_phy.h"
54 #include "intel_fifo_underrun.h"
55 #include "intel_hdcp.h"
56 #include "intel_hdmi.h"
57 #include "intel_hotplug.h"
58 #include "intel_lspcon.h"
59 #include "intel_lvds.h"
60 #include "intel_panel.h"
61 #include "intel_psr.h"
62 #include "intel_sideband.h"
64 #include "intel_vdsc.h"
66 #define DP_DPRX_ESI_LEN 14
68 /* DP DSC throughput values used for slice count calculations KPixels/s */
69 #define DP_DSC_PEAK_PIXEL_RATE 2720000
70 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
71 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
73 /* DP DSC FEC Overhead factor = 1/(0.972261) */
74 #define DP_DSC_FEC_OVERHEAD_FACTOR 972261
76 /* Compliance test status bits */
77 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
78 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
79 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
80 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
87 static const struct dp_link_dpll g4x_dpll[] = {
89 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
91 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
94 static const struct dp_link_dpll pch_dpll[] = {
96 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
98 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
101 static const struct dp_link_dpll vlv_dpll[] = {
103 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
105 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
109 * CHV supports eDP 1.4 that have more link rates.
110 * Below only provides the fixed rate but exclude variable rate.
112 static const struct dp_link_dpll chv_dpll[] = {
114 * CHV requires to program fractional division for m2.
115 * m2 is stored in fixed point format using formula below
116 * (m2_int << 22) | m2_fraction
118 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
119 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
120 { 270000, /* m2_int = 27, m2_fraction = 0 */
121 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
124 /* Constants for DP DSC configurations */
125 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
127 /* With Single pipe configuration, HW is capable of supporting maximum
128 * of 4 slices per line.
130 static const u8 valid_dsc_slicecount[] = {1, 2, 4};
133 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
134 * @intel_dp: DP struct
136 * If a CPU or PCH DP output is attached to an eDP panel, this function
137 * will return true, and false otherwise.
139 bool intel_dp_is_edp(struct intel_dp *intel_dp)
141 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
143 return dig_port->base.type == INTEL_OUTPUT_EDP;
146 static void intel_dp_link_down(struct intel_encoder *encoder,
147 const struct intel_crtc_state *old_crtc_state);
148 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
149 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
150 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
151 const struct intel_crtc_state *crtc_state);
152 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
154 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
156 /* update sink rates from dpcd */
157 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
159 static const int dp_rates[] = {
160 162000, 270000, 540000, 810000
165 if (drm_dp_has_quirk(&intel_dp->desc, 0,
166 DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
167 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
168 static const int quirk_rates[] = { 162000, 270000, 324000 };
170 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates));
171 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates);
176 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
177 max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps);
179 max_rate = min(max_rate, max_lttpr_rate);
181 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
182 if (dp_rates[i] > max_rate)
184 intel_dp->sink_rates[i] = dp_rates[i];
187 intel_dp->num_sink_rates = i;
190 /* Get length of rates array potentially limited by max_rate. */
191 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
195 /* Limit results by potentially reduced max rate */
196 for (i = 0; i < len; i++) {
197 if (rates[len - i - 1] <= max_rate)
204 /* Get length of common rates array potentially limited by max_rate. */
205 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
208 return intel_dp_rate_limit_len(intel_dp->common_rates,
209 intel_dp->num_common_rates, max_rate);
212 /* Theoretical max between source and sink */
213 static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
215 return intel_dp->common_rates[intel_dp->num_common_rates - 1];
218 /* Theoretical max between source and sink */
219 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
221 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
222 int source_max = dig_port->max_lanes;
223 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
224 int fia_max = intel_tc_port_fia_max_lane_count(dig_port);
225 int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps);
228 sink_max = min(sink_max, lttpr_max);
230 return min3(source_max, sink_max, fia_max);
233 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
235 return intel_dp->max_link_lane_count;
239 intel_dp_link_required(int pixel_clock, int bpp)
241 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
242 return DIV_ROUND_UP(pixel_clock * bpp, 8);
246 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
248 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
249 * link rate that is generally expressed in Gbps. Since, 8 bits of data
250 * is transmitted every LS_Clk per lane, there is no need to account for
251 * the channel encoding that is done in the PHY layer here.
254 return max_link_clock * max_lanes;
257 bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
259 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
260 struct intel_encoder *encoder = &intel_dig_port->base;
261 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
263 return INTEL_GEN(dev_priv) >= 12 ||
264 (INTEL_GEN(dev_priv) == 11 &&
265 encoder->port != PORT_A);
268 static int cnl_max_source_rate(struct intel_dp *intel_dp)
270 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
271 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
272 enum port port = dig_port->base.port;
274 u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
276 /* Low voltage SKUs are limited to max of 5.4G */
277 if (voltage == VOLTAGE_INFO_0_85V)
280 /* For this SKU 8.1G is supported in all ports */
281 if (IS_CNL_WITH_PORT_F(dev_priv))
284 /* For other SKUs, max rate on ports A and D is 5.4G */
285 if (port == PORT_A || port == PORT_D)
291 static int icl_max_source_rate(struct intel_dp *intel_dp)
293 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
294 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
295 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
297 if (intel_phy_is_combo(dev_priv, phy) &&
298 !intel_dp_is_edp(intel_dp))
304 static int ehl_max_source_rate(struct intel_dp *intel_dp)
306 if (intel_dp_is_edp(intel_dp))
313 intel_dp_set_source_rates(struct intel_dp *intel_dp)
315 /* The values must be in increasing order */
316 static const int cnl_rates[] = {
317 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
319 static const int bxt_rates[] = {
320 162000, 216000, 243000, 270000, 324000, 432000, 540000
322 static const int skl_rates[] = {
323 162000, 216000, 270000, 324000, 432000, 540000
325 static const int hsw_rates[] = {
326 162000, 270000, 540000
328 static const int g4x_rates[] = {
331 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
332 struct intel_encoder *encoder = &dig_port->base;
333 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
334 const int *source_rates;
335 int size, max_rate = 0, vbt_max_rate;
337 /* This should only be done once */
338 drm_WARN_ON(&dev_priv->drm,
339 intel_dp->source_rates || intel_dp->num_source_rates);
341 if (INTEL_GEN(dev_priv) >= 10) {
342 source_rates = cnl_rates;
343 size = ARRAY_SIZE(cnl_rates);
344 if (IS_GEN(dev_priv, 10))
345 max_rate = cnl_max_source_rate(intel_dp);
346 else if (IS_JSL_EHL(dev_priv))
347 max_rate = ehl_max_source_rate(intel_dp);
349 max_rate = icl_max_source_rate(intel_dp);
350 } else if (IS_GEN9_LP(dev_priv)) {
351 source_rates = bxt_rates;
352 size = ARRAY_SIZE(bxt_rates);
353 } else if (IS_GEN9_BC(dev_priv)) {
354 source_rates = skl_rates;
355 size = ARRAY_SIZE(skl_rates);
356 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
357 IS_BROADWELL(dev_priv)) {
358 source_rates = hsw_rates;
359 size = ARRAY_SIZE(hsw_rates);
361 source_rates = g4x_rates;
362 size = ARRAY_SIZE(g4x_rates);
365 vbt_max_rate = intel_bios_dp_max_link_rate(encoder);
366 if (max_rate && vbt_max_rate)
367 max_rate = min(max_rate, vbt_max_rate);
368 else if (vbt_max_rate)
369 max_rate = vbt_max_rate;
372 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
374 intel_dp->source_rates = source_rates;
375 intel_dp->num_source_rates = size;
378 static int intersect_rates(const int *source_rates, int source_len,
379 const int *sink_rates, int sink_len,
382 int i = 0, j = 0, k = 0;
384 while (i < source_len && j < sink_len) {
385 if (source_rates[i] == sink_rates[j]) {
386 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
388 common_rates[k] = source_rates[i];
392 } else if (source_rates[i] < sink_rates[j]) {
401 /* return index of rate in rates array, or -1 if not found */
402 static int intel_dp_rate_index(const int *rates, int len, int rate)
406 for (i = 0; i < len; i++)
407 if (rate == rates[i])
413 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
415 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
417 drm_WARN_ON(&i915->drm,
418 !intel_dp->num_source_rates || !intel_dp->num_sink_rates);
420 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
421 intel_dp->num_source_rates,
422 intel_dp->sink_rates,
423 intel_dp->num_sink_rates,
424 intel_dp->common_rates);
426 /* Paranoia, there should always be something in common. */
427 if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) {
428 intel_dp->common_rates[0] = 162000;
429 intel_dp->num_common_rates = 1;
433 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
437 * FIXME: we need to synchronize the current link parameters with
438 * hardware readout. Currently fast link training doesn't work on
441 if (link_rate == 0 ||
442 link_rate > intel_dp->max_link_rate)
445 if (lane_count == 0 ||
446 lane_count > intel_dp_max_lane_count(intel_dp))
452 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
456 const struct drm_display_mode *fixed_mode =
457 intel_dp->attached_connector->panel.fixed_mode;
458 int mode_rate, max_rate;
460 mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
461 max_rate = intel_dp_max_data_rate(link_rate, lane_count);
462 if (mode_rate > max_rate)
468 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
469 int link_rate, u8 lane_count)
471 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
475 * TODO: Enable fallback on MST links once MST link compute can handle
476 * the fallback params.
478 if (intel_dp->is_mst) {
479 drm_err(&i915->drm, "Link Training Unsuccessful\n");
483 index = intel_dp_rate_index(intel_dp->common_rates,
484 intel_dp->num_common_rates,
487 if (intel_dp_is_edp(intel_dp) &&
488 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
489 intel_dp->common_rates[index - 1],
491 drm_dbg_kms(&i915->drm,
492 "Retrying Link training for eDP with same parameters\n");
495 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
496 intel_dp->max_link_lane_count = lane_count;
497 } else if (lane_count > 1) {
498 if (intel_dp_is_edp(intel_dp) &&
499 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
500 intel_dp_max_common_rate(intel_dp),
502 drm_dbg_kms(&i915->drm,
503 "Retrying Link training for eDP with same parameters\n");
506 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
507 intel_dp->max_link_lane_count = lane_count >> 1;
509 drm_err(&i915->drm, "Link Training Unsuccessful\n");
516 u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
518 return div_u64(mul_u32_u32(mode_clock, 1000000U),
519 DP_DSC_FEC_OVERHEAD_FACTOR);
523 small_joiner_ram_size_bits(struct drm_i915_private *i915)
525 if (INTEL_GEN(i915) >= 11)
531 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
532 u32 link_clock, u32 lane_count,
533 u32 mode_clock, u32 mode_hdisplay,
536 u32 bits_per_pixel, max_bpp_small_joiner_ram;
540 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
541 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
542 * for SST -> TimeSlotsPerMTP is 1,
543 * for MST -> TimeSlotsPerMTP has to be calculated
545 bits_per_pixel = (link_clock * lane_count * 8) /
546 intel_dp_mode_to_fec_clock(mode_clock);
547 drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel);
549 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
550 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
554 max_bpp_small_joiner_ram *= 2;
556 drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n",
557 max_bpp_small_joiner_ram);
560 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
561 * check, output bpp from small joiner RAM check)
563 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
566 u32 max_bpp_bigjoiner =
567 i915->max_cdclk_freq * 48 /
568 intel_dp_mode_to_fec_clock(mode_clock);
570 DRM_DEBUG_KMS("Max big joiner bpp: %u\n", max_bpp_bigjoiner);
571 bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner);
574 /* Error out if the max bpp is less than smallest allowed valid bpp */
575 if (bits_per_pixel < valid_dsc_bpp[0]) {
576 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
577 bits_per_pixel, valid_dsc_bpp[0]);
581 /* Find the nearest match in the array of known BPPs from VESA */
582 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
583 if (bits_per_pixel < valid_dsc_bpp[i + 1])
586 bits_per_pixel = valid_dsc_bpp[i];
589 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
590 * fractional part is 0
592 return bits_per_pixel << 4;
595 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
596 int mode_clock, int mode_hdisplay,
599 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
600 u8 min_slice_count, i;
603 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
604 min_slice_count = DIV_ROUND_UP(mode_clock,
605 DP_DSC_MAX_ENC_THROUGHPUT_0);
607 min_slice_count = DIV_ROUND_UP(mode_clock,
608 DP_DSC_MAX_ENC_THROUGHPUT_1);
610 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
611 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
612 drm_dbg_kms(&i915->drm,
613 "Unsupported slice width %d by DP DSC Sink device\n",
617 /* Also take into account max slice width */
618 min_slice_count = max_t(u8, min_slice_count,
619 DIV_ROUND_UP(mode_hdisplay,
622 /* Find the closest match to the valid slice count values */
623 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
624 u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner;
626 if (test_slice_count >
627 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, false))
630 /* big joiner needs small joiner to be enabled */
631 if (bigjoiner && test_slice_count < 4)
634 if (min_slice_count <= test_slice_count)
635 return test_slice_count;
638 drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n",
643 static enum intel_output_format
644 intel_dp_output_format(struct drm_connector *connector,
645 const struct drm_display_mode *mode)
647 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
648 const struct drm_display_info *info = &connector->display_info;
650 if (!connector->ycbcr_420_allowed ||
651 !drm_mode_is_420_only(info, mode))
652 return INTEL_OUTPUT_FORMAT_RGB;
654 if (intel_dp->dfp.rgb_to_ycbcr &&
655 intel_dp->dfp.ycbcr_444_to_420)
656 return INTEL_OUTPUT_FORMAT_RGB;
658 if (intel_dp->dfp.ycbcr_444_to_420)
659 return INTEL_OUTPUT_FORMAT_YCBCR444;
661 return INTEL_OUTPUT_FORMAT_YCBCR420;
664 int intel_dp_min_bpp(enum intel_output_format output_format)
666 if (output_format == INTEL_OUTPUT_FORMAT_RGB)
672 static int intel_dp_output_bpp(enum intel_output_format output_format, int bpp)
675 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
676 * format of the number of bytes per pixel will be half the number
677 * of bytes of RGB pixel.
679 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
686 intel_dp_mode_min_output_bpp(struct drm_connector *connector,
687 const struct drm_display_mode *mode)
689 enum intel_output_format output_format =
690 intel_dp_output_format(connector, mode);
692 return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format));
695 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
699 * Older platforms don't like hdisplay==4096 with DP.
701 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline
702 * and frame counter increment), but we don't get vblank interrupts,
703 * and the pipe underruns immediately. The link also doesn't seem
704 * to get trained properly.
706 * On CHV the vblank interrupts don't seem to disappear but
707 * otherwise the symptoms are similar.
709 * TODO: confirm the behaviour on HSW+
711 return hdisplay == 4096 && !HAS_DDI(dev_priv);
714 static enum drm_mode_status
715 intel_dp_mode_valid_downstream(struct intel_connector *connector,
716 const struct drm_display_mode *mode,
719 struct intel_dp *intel_dp = intel_attached_dp(connector);
720 const struct drm_display_info *info = &connector->base.display_info;
723 /* If PCON supports FRL MODE, check FRL bandwidth constraints */
724 if (intel_dp->dfp.pcon_max_frl_bw) {
727 int bpp = intel_dp_mode_min_output_bpp(&connector->base, mode);
729 target_bw = bpp * target_clock;
731 max_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
733 /* converting bw from Gbps to Kbps*/
734 max_frl_bw = max_frl_bw * 1000000;
736 if (target_bw > max_frl_bw)
737 return MODE_CLOCK_HIGH;
742 if (intel_dp->dfp.max_dotclock &&
743 target_clock > intel_dp->dfp.max_dotclock)
744 return MODE_CLOCK_HIGH;
746 /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */
747 tmds_clock = target_clock;
748 if (drm_mode_is_420_only(info, mode))
751 if (intel_dp->dfp.min_tmds_clock &&
752 tmds_clock < intel_dp->dfp.min_tmds_clock)
753 return MODE_CLOCK_LOW;
754 if (intel_dp->dfp.max_tmds_clock &&
755 tmds_clock > intel_dp->dfp.max_tmds_clock)
756 return MODE_CLOCK_HIGH;
761 static enum drm_mode_status
762 intel_dp_mode_valid(struct drm_connector *connector,
763 struct drm_display_mode *mode)
765 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
766 struct intel_connector *intel_connector = to_intel_connector(connector);
767 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
768 struct drm_i915_private *dev_priv = to_i915(connector->dev);
769 int target_clock = mode->clock;
770 int max_rate, mode_rate, max_lanes, max_link_clock;
771 int max_dotclk = dev_priv->max_dotclk_freq;
772 u16 dsc_max_output_bpp = 0;
773 u8 dsc_slice_count = 0;
774 enum drm_mode_status status;
775 bool dsc = false, bigjoiner = false;
777 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
778 return MODE_NO_DBLESCAN;
780 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
781 return MODE_H_ILLEGAL;
783 if (intel_dp_is_edp(intel_dp) && fixed_mode) {
784 if (mode->hdisplay > fixed_mode->hdisplay)
787 if (mode->vdisplay > fixed_mode->vdisplay)
790 target_clock = fixed_mode->clock;
793 if (mode->clock < 10000)
794 return MODE_CLOCK_LOW;
796 if ((target_clock > max_dotclk || mode->hdisplay > 5120) &&
797 intel_dp_can_bigjoiner(intel_dp)) {
801 if (target_clock > max_dotclk)
802 return MODE_CLOCK_HIGH;
804 max_link_clock = intel_dp_max_link_rate(intel_dp);
805 max_lanes = intel_dp_max_lane_count(intel_dp);
807 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
808 mode_rate = intel_dp_link_required(target_clock,
809 intel_dp_mode_min_output_bpp(connector, mode));
811 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
812 return MODE_H_ILLEGAL;
815 * Output bpp is stored in 6.4 format so right shift by 4 to get the
816 * integer value since we support only integer values of bpp.
818 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
819 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
820 if (intel_dp_is_edp(intel_dp)) {
822 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
824 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
826 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
828 intel_dp_dsc_get_output_bpp(dev_priv,
835 intel_dp_dsc_get_slice_count(intel_dp,
841 dsc = dsc_max_output_bpp && dsc_slice_count;
844 /* big joiner configuration needs DSC */
845 if (bigjoiner && !dsc)
846 return MODE_CLOCK_HIGH;
848 if (mode_rate > max_rate && !dsc)
849 return MODE_CLOCK_HIGH;
851 status = intel_dp_mode_valid_downstream(intel_connector,
853 if (status != MODE_OK)
856 return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner);
859 u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
866 for (i = 0; i < src_bytes; i++)
867 v |= ((u32)src[i]) << ((3 - i) * 8);
871 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
876 for (i = 0; i < dst_bytes; i++)
877 dst[i] = src >> ((3-i) * 8);
881 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
883 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
884 bool force_disable_vdd);
886 intel_dp_pps_init(struct intel_dp *intel_dp);
888 static intel_wakeref_t
889 pps_lock(struct intel_dp *intel_dp)
891 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
892 intel_wakeref_t wakeref;
895 * See intel_power_sequencer_reset() why we need
896 * a power domain reference here.
898 wakeref = intel_display_power_get(dev_priv,
899 intel_aux_power_domain(dp_to_dig_port(intel_dp)));
901 mutex_lock(&dev_priv->pps_mutex);
906 static intel_wakeref_t
907 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
909 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
911 mutex_unlock(&dev_priv->pps_mutex);
912 intel_display_power_put(dev_priv,
913 intel_aux_power_domain(dp_to_dig_port(intel_dp)),
918 #define with_pps_lock(dp, wf) \
919 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
922 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
924 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
925 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
926 enum pipe pipe = intel_dp->pps_pipe;
927 bool pll_enabled, release_cl_override = false;
928 enum dpio_phy phy = DPIO_PHY(pipe);
929 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
932 if (drm_WARN(&dev_priv->drm,
933 intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
934 "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
935 pipe_name(pipe), dig_port->base.base.base.id,
936 dig_port->base.base.name))
939 drm_dbg_kms(&dev_priv->drm,
940 "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
941 pipe_name(pipe), dig_port->base.base.base.id,
942 dig_port->base.base.name);
944 /* Preserve the BIOS-computed detected bit. This is
945 * supposed to be read-only.
947 DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
948 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
949 DP |= DP_PORT_WIDTH(1);
950 DP |= DP_LINK_TRAIN_PAT_1;
952 if (IS_CHERRYVIEW(dev_priv))
953 DP |= DP_PIPE_SEL_CHV(pipe);
955 DP |= DP_PIPE_SEL(pipe);
957 pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
960 * The DPLL for the pipe must be enabled for this to work.
961 * So enable temporarily it if it's not already enabled.
964 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
965 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
967 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
968 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
969 drm_err(&dev_priv->drm,
970 "Failed to force on pll for pipe %c!\n",
977 * Similar magic as in intel_dp_enable_port().
978 * We _must_ do this port enable + disable trick
979 * to make this power sequencer lock onto the port.
980 * Otherwise even VDD force bit won't work.
982 intel_de_write(dev_priv, intel_dp->output_reg, DP);
983 intel_de_posting_read(dev_priv, intel_dp->output_reg);
985 intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
986 intel_de_posting_read(dev_priv, intel_dp->output_reg);
988 intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
989 intel_de_posting_read(dev_priv, intel_dp->output_reg);
992 vlv_force_pll_off(dev_priv, pipe);
994 if (release_cl_override)
995 chv_phy_powergate_ch(dev_priv, phy, ch, false);
999 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
1001 struct intel_encoder *encoder;
1002 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
1005 * We don't have power sequencer currently.
1006 * Pick one that's not used by other ports.
1008 for_each_intel_dp(&dev_priv->drm, encoder) {
1009 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1011 if (encoder->type == INTEL_OUTPUT_EDP) {
1012 drm_WARN_ON(&dev_priv->drm,
1013 intel_dp->active_pipe != INVALID_PIPE &&
1014 intel_dp->active_pipe !=
1015 intel_dp->pps_pipe);
1017 if (intel_dp->pps_pipe != INVALID_PIPE)
1018 pipes &= ~(1 << intel_dp->pps_pipe);
1020 drm_WARN_ON(&dev_priv->drm,
1021 intel_dp->pps_pipe != INVALID_PIPE);
1023 if (intel_dp->active_pipe != INVALID_PIPE)
1024 pipes &= ~(1 << intel_dp->active_pipe);
1029 return INVALID_PIPE;
1031 return ffs(pipes) - 1;
1035 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
1037 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1038 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1041 lockdep_assert_held(&dev_priv->pps_mutex);
1043 /* We should never land here with regular DP ports */
1044 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
1046 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE &&
1047 intel_dp->active_pipe != intel_dp->pps_pipe);
1049 if (intel_dp->pps_pipe != INVALID_PIPE)
1050 return intel_dp->pps_pipe;
1052 pipe = vlv_find_free_pps(dev_priv);
1055 * Didn't find one. This should not happen since there
1056 * are two power sequencers and up to two eDP ports.
1058 if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
1061 vlv_steal_power_sequencer(dev_priv, pipe);
1062 intel_dp->pps_pipe = pipe;
1064 drm_dbg_kms(&dev_priv->drm,
1065 "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
1066 pipe_name(intel_dp->pps_pipe),
1067 dig_port->base.base.base.id,
1068 dig_port->base.base.name);
1070 /* init power sequencer on this pipe and port */
1071 intel_dp_init_panel_power_sequencer(intel_dp);
1072 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
1075 * Even vdd force doesn't work until we've made
1076 * the power sequencer lock in on the port.
1078 vlv_power_sequencer_kick(intel_dp);
1080 return intel_dp->pps_pipe;
1084 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
1086 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1087 int backlight_controller = dev_priv->vbt.backlight.controller;
1089 lockdep_assert_held(&dev_priv->pps_mutex);
1091 /* We should never land here with regular DP ports */
1092 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
1094 if (!intel_dp->pps_reset)
1095 return backlight_controller;
1097 intel_dp->pps_reset = false;
1100 * Only the HW needs to be reprogrammed, the SW state is fixed and
1101 * has been setup during connector init.
1103 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
1105 return backlight_controller;
1108 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
1111 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
1114 return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
1117 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
1120 return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
1123 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
1130 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
1132 vlv_pipe_check pipe_check)
1136 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
1137 u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
1138 PANEL_PORT_SELECT_MASK;
1140 if (port_sel != PANEL_PORT_SELECT_VLV(port))
1143 if (!pipe_check(dev_priv, pipe))
1149 return INVALID_PIPE;
1153 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
1155 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1156 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1157 enum port port = dig_port->base.port;
1159 lockdep_assert_held(&dev_priv->pps_mutex);
1161 /* try to find a pipe with this port selected */
1162 /* first pick one where the panel is on */
1163 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
1164 vlv_pipe_has_pp_on);
1165 /* didn't find one? pick one where vdd is on */
1166 if (intel_dp->pps_pipe == INVALID_PIPE)
1167 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
1168 vlv_pipe_has_vdd_on);
1169 /* didn't find one? pick one with just the correct port */
1170 if (intel_dp->pps_pipe == INVALID_PIPE)
1171 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
1174 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
1175 if (intel_dp->pps_pipe == INVALID_PIPE) {
1176 drm_dbg_kms(&dev_priv->drm,
1177 "no initial power sequencer for [ENCODER:%d:%s]\n",
1178 dig_port->base.base.base.id,
1179 dig_port->base.base.name);
1183 drm_dbg_kms(&dev_priv->drm,
1184 "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
1185 dig_port->base.base.base.id,
1186 dig_port->base.base.name,
1187 pipe_name(intel_dp->pps_pipe));
1189 intel_dp_init_panel_power_sequencer(intel_dp);
1190 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
1193 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
1195 struct intel_encoder *encoder;
1197 if (drm_WARN_ON(&dev_priv->drm,
1198 !(IS_VALLEYVIEW(dev_priv) ||
1199 IS_CHERRYVIEW(dev_priv) ||
1200 IS_GEN9_LP(dev_priv))))
1204 * We can't grab pps_mutex here due to deadlock with power_domain
1205 * mutex when power_domain functions are called while holding pps_mutex.
1206 * That also means that in order to use pps_pipe the code needs to
1207 * hold both a power domain reference and pps_mutex, and the power domain
1208 * reference get/put must be done while _not_ holding pps_mutex.
1209 * pps_{lock,unlock}() do these steps in the correct order, so one
1210 * should use them always.
1213 for_each_intel_dp(&dev_priv->drm, encoder) {
1214 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1216 drm_WARN_ON(&dev_priv->drm,
1217 intel_dp->active_pipe != INVALID_PIPE);
1219 if (encoder->type != INTEL_OUTPUT_EDP)
1222 if (IS_GEN9_LP(dev_priv))
1223 intel_dp->pps_reset = true;
1225 intel_dp->pps_pipe = INVALID_PIPE;
1229 struct pps_registers {
1237 static void intel_pps_get_registers(struct intel_dp *intel_dp,
1238 struct pps_registers *regs)
1240 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1243 memset(regs, 0, sizeof(*regs));
1245 if (IS_GEN9_LP(dev_priv))
1246 pps_idx = bxt_power_sequencer_idx(intel_dp);
1247 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1248 pps_idx = vlv_power_sequencer_pipe(intel_dp);
1250 regs->pp_ctrl = PP_CONTROL(pps_idx);
1251 regs->pp_stat = PP_STATUS(pps_idx);
1252 regs->pp_on = PP_ON_DELAYS(pps_idx);
1253 regs->pp_off = PP_OFF_DELAYS(pps_idx);
1255 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
1256 if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
1257 regs->pp_div = INVALID_MMIO_REG;
1259 regs->pp_div = PP_DIVISOR(pps_idx);
1263 _pp_ctrl_reg(struct intel_dp *intel_dp)
1265 struct pps_registers regs;
1267 intel_pps_get_registers(intel_dp, ®s);
1269 return regs.pp_ctrl;
1273 _pp_stat_reg(struct intel_dp *intel_dp)
1275 struct pps_registers regs;
1277 intel_pps_get_registers(intel_dp, ®s);
1279 return regs.pp_stat;
1282 static bool edp_have_panel_power(struct intel_dp *intel_dp)
1284 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1286 lockdep_assert_held(&dev_priv->pps_mutex);
1288 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1289 intel_dp->pps_pipe == INVALID_PIPE)
1292 return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
1295 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
1297 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1299 lockdep_assert_held(&dev_priv->pps_mutex);
1301 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1302 intel_dp->pps_pipe == INVALID_PIPE)
1305 return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
1309 intel_dp_check_edp(struct intel_dp *intel_dp)
1311 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1313 if (!intel_dp_is_edp(intel_dp))
1316 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
1317 drm_WARN(&dev_priv->drm, 1,
1318 "eDP powered off while attempting aux channel communication.\n");
1319 drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
1320 intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
1321 intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
1326 intel_dp_aux_wait_done(struct intel_dp *intel_dp)
1328 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1329 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1330 const unsigned int timeout_ms = 10;
1334 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1335 done = wait_event_timeout(i915->gmbus_wait_queue, C,
1336 msecs_to_jiffies_timeout(timeout_ms));
1338 /* just trace the final value */
1339 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1343 "%s: did not complete or timeout within %ums (status 0x%08x)\n",
1344 intel_dp->aux.name, timeout_ms, status);
1350 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1352 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1358 * The clock divider is based off the hrawclk, and would like to run at
1359 * 2MHz. So, take the hrawclk value and divide by 2000 and use that
1361 return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
1364 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1366 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1367 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1374 * The clock divider is based off the cdclk or PCH rawclk, and would
1375 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
1376 * divide by 2000 and use that
1378 if (dig_port->aux_ch == AUX_CH_A)
1379 freq = dev_priv->cdclk.hw.cdclk;
1381 freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
1382 return DIV_ROUND_CLOSEST(freq, 2000);
1385 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1387 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1388 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1390 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
1391 /* Workaround for non-ULT HSW */
1399 return ilk_get_aux_clock_divider(intel_dp, index);
1402 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1405 * SKL doesn't need us to program the AUX clock divider (Hardware will
1406 * derive the clock from CDCLK automatically). We still implement the
1407 * get_aux_clock_divider vfunc to plug-in into the existing code.
1409 return index ? 0 : 1;
1412 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1414 u32 aux_clock_divider)
1416 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1417 struct drm_i915_private *dev_priv =
1418 to_i915(dig_port->base.base.dev);
1419 u32 precharge, timeout;
1421 if (IS_GEN(dev_priv, 6))
1426 if (IS_BROADWELL(dev_priv))
1427 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1429 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1431 return DP_AUX_CH_CTL_SEND_BUSY |
1432 DP_AUX_CH_CTL_DONE |
1433 DP_AUX_CH_CTL_INTERRUPT |
1434 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1436 DP_AUX_CH_CTL_RECEIVE_ERROR |
1437 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1438 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1439 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1442 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1446 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1447 struct drm_i915_private *i915 =
1448 to_i915(dig_port->base.base.dev);
1449 enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
1452 ret = DP_AUX_CH_CTL_SEND_BUSY |
1453 DP_AUX_CH_CTL_DONE |
1454 DP_AUX_CH_CTL_INTERRUPT |
1455 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1456 DP_AUX_CH_CTL_TIME_OUT_MAX |
1457 DP_AUX_CH_CTL_RECEIVE_ERROR |
1458 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1459 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1460 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1462 if (intel_phy_is_tc(i915, phy) &&
1463 dig_port->tc_mode == TC_PORT_TBT_ALT)
1464 ret |= DP_AUX_CH_CTL_TBT_IO;
1470 intel_dp_aux_xfer(struct intel_dp *intel_dp,
1471 const u8 *send, int send_bytes,
1472 u8 *recv, int recv_size,
1473 u32 aux_send_ctl_flags)
1475 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1476 struct drm_i915_private *i915 =
1477 to_i915(dig_port->base.base.dev);
1478 struct intel_uncore *uncore = &i915->uncore;
1479 enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
1480 bool is_tc_port = intel_phy_is_tc(i915, phy);
1481 i915_reg_t ch_ctl, ch_data[5];
1482 u32 aux_clock_divider;
1483 enum intel_display_power_domain aux_domain;
1484 intel_wakeref_t aux_wakeref;
1485 intel_wakeref_t pps_wakeref;
1486 int i, ret, recv_bytes;
1491 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1492 for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1493 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1496 intel_tc_port_lock(dig_port);
1498 aux_domain = intel_aux_power_domain(dig_port);
1500 aux_wakeref = intel_display_power_get(i915, aux_domain);
1501 pps_wakeref = pps_lock(intel_dp);
1504 * We will be called with VDD already enabled for dpcd/edid/oui reads.
1505 * In such cases we want to leave VDD enabled and it's up to upper layers
1506 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1509 vdd = edp_panel_vdd_on(intel_dp);
1511 /* dp aux is extremely sensitive to irq latency, hence request the
1512 * lowest possible wakeup latency and so prevent the cpu from going into
1513 * deep sleep states.
1515 cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
1517 intel_dp_check_edp(intel_dp);
1519 /* Try to wait for any previous AUX channel activity */
1520 for (try = 0; try < 3; try++) {
1521 status = intel_uncore_read_notrace(uncore, ch_ctl);
1522 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1526 /* just trace the final value */
1527 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1530 const u32 status = intel_uncore_read(uncore, ch_ctl);
1532 if (status != intel_dp->aux_busy_last_status) {
1533 drm_WARN(&i915->drm, 1,
1534 "%s: not started (status 0x%08x)\n",
1535 intel_dp->aux.name, status);
1536 intel_dp->aux_busy_last_status = status;
1543 /* Only 5 data registers! */
1544 if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
1549 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1550 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1554 send_ctl |= aux_send_ctl_flags;
1556 /* Must try at least 3 times according to DP spec */
1557 for (try = 0; try < 5; try++) {
1558 /* Load the send data into the aux channel data registers */
1559 for (i = 0; i < send_bytes; i += 4)
1560 intel_uncore_write(uncore,
1562 intel_dp_pack_aux(send + i,
1565 /* Send the command and wait for it to complete */
1566 intel_uncore_write(uncore, ch_ctl, send_ctl);
1568 status = intel_dp_aux_wait_done(intel_dp);
1570 /* Clear done status and any errors */
1571 intel_uncore_write(uncore,
1574 DP_AUX_CH_CTL_DONE |
1575 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1576 DP_AUX_CH_CTL_RECEIVE_ERROR);
1578 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1579 * 400us delay required for errors and timeouts
1580 * Timeout errors from the HW already meet this
1581 * requirement so skip to next iteration
1583 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1586 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1587 usleep_range(400, 500);
1590 if (status & DP_AUX_CH_CTL_DONE)
1595 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1596 drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
1597 intel_dp->aux.name, status);
1603 /* Check for timeout or receive error.
1604 * Timeouts occur when the sink is not connected
1606 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1607 drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
1608 intel_dp->aux.name, status);
1613 /* Timeouts occur when the device isn't connected, so they're
1614 * "normal" -- don't fill the kernel log with these */
1615 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1616 drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
1617 intel_dp->aux.name, status);
1622 /* Unload any bytes sent back from the other side */
1623 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1624 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1627 * By BSpec: "Message sizes of 0 or >20 are not allowed."
1628 * We have no idea of what happened so we return -EBUSY so
1629 * drm layer takes care for the necessary retries.
1631 if (recv_bytes == 0 || recv_bytes > 20) {
1632 drm_dbg_kms(&i915->drm,
1633 "%s: Forbidden recv_bytes = %d on aux transaction\n",
1634 intel_dp->aux.name, recv_bytes);
1639 if (recv_bytes > recv_size)
1640 recv_bytes = recv_size;
1642 for (i = 0; i < recv_bytes; i += 4)
1643 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
1644 recv + i, recv_bytes - i);
1648 cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
1651 edp_panel_vdd_off(intel_dp, false);
1653 pps_unlock(intel_dp, pps_wakeref);
1654 intel_display_power_put_async(i915, aux_domain, aux_wakeref);
1657 intel_tc_port_unlock(dig_port);
1662 #define BARE_ADDRESS_SIZE 3
1663 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
1666 intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
1667 const struct drm_dp_aux_msg *msg)
1669 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
1670 txbuf[1] = (msg->address >> 8) & 0xff;
1671 txbuf[2] = msg->address & 0xff;
1672 txbuf[3] = msg->size - 1;
1675 static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
1678 * If we're trying to send the HDCP Aksv, we need to set a the Aksv
1679 * select bit to inform the hardware to send the Aksv after our header
1680 * since we can't access that data from software.
1682 if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
1683 msg->address == DP_AUX_HDCP_AKSV)
1684 return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
1690 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1692 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1693 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1694 u8 txbuf[20], rxbuf[20];
1695 size_t txsize, rxsize;
1696 u32 flags = intel_dp_aux_xfer_flags(msg);
1699 intel_dp_aux_header(txbuf, msg);
1701 switch (msg->request & ~DP_AUX_I2C_MOT) {
1702 case DP_AUX_NATIVE_WRITE:
1703 case DP_AUX_I2C_WRITE:
1704 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1705 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1706 rxsize = 2; /* 0 or 1 data bytes */
1708 if (drm_WARN_ON(&i915->drm, txsize > 20))
1711 drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
1714 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1716 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1717 rxbuf, rxsize, flags);
1719 msg->reply = rxbuf[0] >> 4;
1722 /* Number of bytes written in a short write. */
1723 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1725 /* Return payload size. */
1731 case DP_AUX_NATIVE_READ:
1732 case DP_AUX_I2C_READ:
1733 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1734 rxsize = msg->size + 1;
1736 if (drm_WARN_ON(&i915->drm, rxsize > 20))
1739 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1740 rxbuf, rxsize, flags);
1742 msg->reply = rxbuf[0] >> 4;
1744 * Assume happy day, and copy the data. The caller is
1745 * expected to check msg->reply before touching it.
1747 * Return payload size.
1750 memcpy(msg->buffer, rxbuf + 1, ret);
1763 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1765 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1766 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1767 enum aux_ch aux_ch = dig_port->aux_ch;
1773 return DP_AUX_CH_CTL(aux_ch);
1775 MISSING_CASE(aux_ch);
1776 return DP_AUX_CH_CTL(AUX_CH_B);
1780 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1782 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1783 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1784 enum aux_ch aux_ch = dig_port->aux_ch;
1790 return DP_AUX_CH_DATA(aux_ch, index);
1792 MISSING_CASE(aux_ch);
1793 return DP_AUX_CH_DATA(AUX_CH_B, index);
1797 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1799 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1800 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1801 enum aux_ch aux_ch = dig_port->aux_ch;
1805 return DP_AUX_CH_CTL(aux_ch);
1809 return PCH_DP_AUX_CH_CTL(aux_ch);
1811 MISSING_CASE(aux_ch);
1812 return DP_AUX_CH_CTL(AUX_CH_A);
1816 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1818 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1819 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1820 enum aux_ch aux_ch = dig_port->aux_ch;
1824 return DP_AUX_CH_DATA(aux_ch, index);
1828 return PCH_DP_AUX_CH_DATA(aux_ch, index);
1830 MISSING_CASE(aux_ch);
1831 return DP_AUX_CH_DATA(AUX_CH_A, index);
1835 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1837 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1838 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1839 enum aux_ch aux_ch = dig_port->aux_ch;
1848 return DP_AUX_CH_CTL(aux_ch);
1850 MISSING_CASE(aux_ch);
1851 return DP_AUX_CH_CTL(AUX_CH_A);
1855 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1857 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1858 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1859 enum aux_ch aux_ch = dig_port->aux_ch;
1868 return DP_AUX_CH_DATA(aux_ch, index);
1870 MISSING_CASE(aux_ch);
1871 return DP_AUX_CH_DATA(AUX_CH_A, index);
1875 static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
1877 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1878 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1879 enum aux_ch aux_ch = dig_port->aux_ch;
1891 return DP_AUX_CH_CTL(aux_ch);
1893 MISSING_CASE(aux_ch);
1894 return DP_AUX_CH_CTL(AUX_CH_A);
1898 static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
1900 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1901 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1902 enum aux_ch aux_ch = dig_port->aux_ch;
1914 return DP_AUX_CH_DATA(aux_ch, index);
1916 MISSING_CASE(aux_ch);
1917 return DP_AUX_CH_DATA(AUX_CH_A, index);
1922 intel_dp_aux_fini(struct intel_dp *intel_dp)
1924 if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
1925 cpu_latency_qos_remove_request(&intel_dp->pm_qos);
1927 kfree(intel_dp->aux.name);
1931 intel_dp_aux_init(struct intel_dp *intel_dp)
1933 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1934 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1935 struct intel_encoder *encoder = &dig_port->base;
1936 enum aux_ch aux_ch = dig_port->aux_ch;
1938 if (INTEL_GEN(dev_priv) >= 12) {
1939 intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
1940 intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
1941 } else if (INTEL_GEN(dev_priv) >= 9) {
1942 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1943 intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1944 } else if (HAS_PCH_SPLIT(dev_priv)) {
1945 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1946 intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1948 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1949 intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1952 if (INTEL_GEN(dev_priv) >= 9)
1953 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1954 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1955 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1956 else if (HAS_PCH_SPLIT(dev_priv))
1957 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1959 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
1961 if (INTEL_GEN(dev_priv) >= 9)
1962 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1964 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
1966 drm_dp_aux_init(&intel_dp->aux);
1968 /* Failure to allocate our preferred name is not critical */
1969 if (INTEL_GEN(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1)
1970 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX USBC%c/%s",
1971 aux_ch - AUX_CH_USBC1 + '1',
1972 encoder->base.name);
1974 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s",
1975 aux_ch_name(aux_ch),
1976 encoder->base.name);
1978 intel_dp->aux.transfer = intel_dp_aux_transfer;
1979 cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
1982 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1984 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1986 return max_rate >= 540000;
1989 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
1991 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1993 return max_rate >= 810000;
1997 intel_dp_set_clock(struct intel_encoder *encoder,
1998 struct intel_crtc_state *pipe_config)
2000 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2001 const struct dp_link_dpll *divisor = NULL;
2004 if (IS_G4X(dev_priv)) {
2006 count = ARRAY_SIZE(g4x_dpll);
2007 } else if (HAS_PCH_SPLIT(dev_priv)) {
2009 count = ARRAY_SIZE(pch_dpll);
2010 } else if (IS_CHERRYVIEW(dev_priv)) {
2012 count = ARRAY_SIZE(chv_dpll);
2013 } else if (IS_VALLEYVIEW(dev_priv)) {
2015 count = ARRAY_SIZE(vlv_dpll);
2018 if (divisor && count) {
2019 for (i = 0; i < count; i++) {
2020 if (pipe_config->port_clock == divisor[i].clock) {
2021 pipe_config->dpll = divisor[i].dpll;
2022 pipe_config->clock_set = true;
2029 static void snprintf_int_array(char *str, size_t len,
2030 const int *array, int nelem)
2036 for (i = 0; i < nelem; i++) {
2037 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
2045 static void intel_dp_print_rates(struct intel_dp *intel_dp)
2047 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2048 char str[128]; /* FIXME: too big for stack? */
2050 if (!drm_debug_enabled(DRM_UT_KMS))
2053 snprintf_int_array(str, sizeof(str),
2054 intel_dp->source_rates, intel_dp->num_source_rates);
2055 drm_dbg_kms(&i915->drm, "source rates: %s\n", str);
2057 snprintf_int_array(str, sizeof(str),
2058 intel_dp->sink_rates, intel_dp->num_sink_rates);
2059 drm_dbg_kms(&i915->drm, "sink rates: %s\n", str);
2061 snprintf_int_array(str, sizeof(str),
2062 intel_dp->common_rates, intel_dp->num_common_rates);
2063 drm_dbg_kms(&i915->drm, "common rates: %s\n", str);
2067 intel_dp_max_link_rate(struct intel_dp *intel_dp)
2069 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2072 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
2073 if (drm_WARN_ON(&i915->drm, len <= 0))
2076 return intel_dp->common_rates[len - 1];
2079 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
2081 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2082 int i = intel_dp_rate_index(intel_dp->sink_rates,
2083 intel_dp->num_sink_rates, rate);
2085 if (drm_WARN_ON(&i915->drm, i < 0))
2091 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
2092 u8 *link_bw, u8 *rate_select)
2094 /* eDP 1.4 rate select method. */
2095 if (intel_dp->use_rate_select) {
2098 intel_dp_rate_select(intel_dp, port_clock);
2100 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
2105 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
2106 const struct intel_crtc_state *pipe_config)
2108 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2110 /* On TGL, FEC is supported on all Pipes */
2111 if (INTEL_GEN(dev_priv) >= 12)
2114 if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A)
2120 static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
2121 const struct intel_crtc_state *pipe_config)
2123 return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
2124 drm_dp_sink_supports_fec(intel_dp->fec_capable);
2127 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
2128 const struct intel_crtc_state *crtc_state)
2130 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable)
2133 return intel_dsc_source_support(crtc_state) &&
2134 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
2137 static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp,
2138 const struct intel_crtc_state *crtc_state)
2140 return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
2141 (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
2142 intel_dp->dfp.ycbcr_444_to_420);
2145 static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp,
2146 const struct intel_crtc_state *crtc_state, int bpc)
2148 int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8;
2150 if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state))
2156 static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp,
2157 const struct intel_crtc_state *crtc_state, int bpc)
2159 int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc);
2161 if (intel_dp->dfp.min_tmds_clock &&
2162 tmds_clock < intel_dp->dfp.min_tmds_clock)
2165 if (intel_dp->dfp.max_tmds_clock &&
2166 tmds_clock > intel_dp->dfp.max_tmds_clock)
2172 static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp,
2173 const struct intel_crtc_state *crtc_state,
2177 return intel_hdmi_deep_color_possible(crtc_state, bpc,
2178 intel_dp->has_hdmi_sink,
2179 intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) &&
2180 intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc);
2183 static int intel_dp_max_bpp(struct intel_dp *intel_dp,
2184 const struct intel_crtc_state *crtc_state)
2186 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2187 struct intel_connector *intel_connector = intel_dp->attached_connector;
2190 bpc = crtc_state->pipe_bpp / 3;
2192 if (intel_dp->dfp.max_bpc)
2193 bpc = min_t(int, bpc, intel_dp->dfp.max_bpc);
2195 if (intel_dp->dfp.min_tmds_clock) {
2196 for (; bpc >= 10; bpc -= 2) {
2197 if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc))
2203 if (intel_dp_is_edp(intel_dp)) {
2204 /* Get bpp from vbt only for panels that dont have bpp in edid */
2205 if (intel_connector->base.display_info.bpc == 0 &&
2206 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
2207 drm_dbg_kms(&dev_priv->drm,
2208 "clamping bpp for eDP panel to BIOS-provided %i\n",
2209 dev_priv->vbt.edp.bpp);
2210 bpp = dev_priv->vbt.edp.bpp;
2217 /* Adjust link config limits based on compliance test requests. */
2219 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
2220 struct intel_crtc_state *pipe_config,
2221 struct link_config_limits *limits)
2223 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2225 /* For DP Compliance we override the computed bpp for the pipe */
2226 if (intel_dp->compliance.test_data.bpc != 0) {
2227 int bpp = 3 * intel_dp->compliance.test_data.bpc;
2229 limits->min_bpp = limits->max_bpp = bpp;
2230 pipe_config->dither_force_disable = bpp == 6 * 3;
2232 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp);
2235 /* Use values requested by Compliance Test Request */
2236 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
2239 /* Validate the compliance test data since max values
2240 * might have changed due to link train fallback.
2242 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
2243 intel_dp->compliance.test_lane_count)) {
2244 index = intel_dp_rate_index(intel_dp->common_rates,
2245 intel_dp->num_common_rates,
2246 intel_dp->compliance.test_link_rate);
2248 limits->min_clock = limits->max_clock = index;
2249 limits->min_lane_count = limits->max_lane_count =
2250 intel_dp->compliance.test_lane_count;
2255 /* Optimize link config in order: max bpp, min clock, min lanes */
2257 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
2258 struct intel_crtc_state *pipe_config,
2259 const struct link_config_limits *limits)
2261 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2262 int bpp, clock, lane_count;
2263 int mode_rate, link_clock, link_avail;
2265 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
2266 int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
2268 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
2271 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
2272 for (lane_count = limits->min_lane_count;
2273 lane_count <= limits->max_lane_count;
2275 link_clock = intel_dp->common_rates[clock];
2276 link_avail = intel_dp_max_data_rate(link_clock,
2279 if (mode_rate <= link_avail) {
2280 pipe_config->lane_count = lane_count;
2281 pipe_config->pipe_bpp = bpp;
2282 pipe_config->port_clock = link_clock;
2293 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
2296 u8 dsc_bpc[3] = {0};
2298 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
2300 for (i = 0; i < num_bpc; i++) {
2301 if (dsc_max_bpc >= dsc_bpc[i])
2302 return dsc_bpc[i] * 3;
2308 #define DSC_SUPPORTED_VERSION_MIN 1
2310 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
2311 struct intel_crtc_state *crtc_state)
2313 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2314 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2315 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2320 * RC_MODEL_SIZE is currently a constant across all configurations.
2322 * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and
2323 * DP_DSC_RC_BUF_SIZE for this.
2325 vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
2327 ret = intel_dsc_compute_params(encoder, crtc_state);
2332 * Slice Height of 8 works for all currently available panels. So start
2333 * with that if pic_height is an integral multiple of 8. Eventually add
2334 * logic to try multiple slice heights.
2336 if (vdsc_cfg->pic_height % 8 == 0)
2337 vdsc_cfg->slice_height = 8;
2338 else if (vdsc_cfg->pic_height % 4 == 0)
2339 vdsc_cfg->slice_height = 4;
2341 vdsc_cfg->slice_height = 2;
2343 vdsc_cfg->dsc_version_major =
2344 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
2345 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
2346 vdsc_cfg->dsc_version_minor =
2347 min(DSC_SUPPORTED_VERSION_MIN,
2348 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
2349 DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
2351 vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
2354 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
2355 if (!line_buf_depth) {
2356 drm_dbg_kms(&i915->drm,
2357 "DSC Sink Line Buffer Depth invalid\n");
2361 if (vdsc_cfg->dsc_version_minor == 2)
2362 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
2363 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
2365 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
2366 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
2368 vdsc_cfg->block_pred_enable =
2369 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
2370 DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
2372 return drm_dsc_compute_rc_parameters(vdsc_cfg);
2375 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
2376 struct intel_crtc_state *pipe_config,
2377 struct drm_connector_state *conn_state,
2378 struct link_config_limits *limits)
2380 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2381 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2382 const struct drm_display_mode *adjusted_mode =
2383 &pipe_config->hw.adjusted_mode;
2388 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
2389 intel_dp_supports_fec(intel_dp, pipe_config);
2391 if (!intel_dp_supports_dsc(intel_dp, pipe_config))
2394 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
2395 if (INTEL_GEN(dev_priv) >= 12)
2396 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
2398 dsc_max_bpc = min_t(u8, 10,
2399 conn_state->max_requested_bpc);
2401 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
2403 /* Min Input BPC for ICL+ is 8 */
2404 if (pipe_bpp < 8 * 3) {
2405 drm_dbg_kms(&dev_priv->drm,
2406 "No DSC support for less than 8bpc\n");
2411 * For now enable DSC for max bpp, max link rate, max lane count.
2412 * Optimize this later for the minimum possible link rate/lane count
2413 * with DSC enabled for the requested mode.
2415 pipe_config->pipe_bpp = pipe_bpp;
2416 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
2417 pipe_config->lane_count = limits->max_lane_count;
2419 if (intel_dp_is_edp(intel_dp)) {
2420 pipe_config->dsc.compressed_bpp =
2421 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
2422 pipe_config->pipe_bpp);
2423 pipe_config->dsc.slice_count =
2424 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
2427 u16 dsc_max_output_bpp;
2428 u8 dsc_dp_slice_count;
2430 dsc_max_output_bpp =
2431 intel_dp_dsc_get_output_bpp(dev_priv,
2432 pipe_config->port_clock,
2433 pipe_config->lane_count,
2434 adjusted_mode->crtc_clock,
2435 adjusted_mode->crtc_hdisplay,
2436 pipe_config->bigjoiner);
2437 dsc_dp_slice_count =
2438 intel_dp_dsc_get_slice_count(intel_dp,
2439 adjusted_mode->crtc_clock,
2440 adjusted_mode->crtc_hdisplay,
2441 pipe_config->bigjoiner);
2442 if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
2443 drm_dbg_kms(&dev_priv->drm,
2444 "Compressed BPP/Slice Count not supported\n");
2447 pipe_config->dsc.compressed_bpp = min_t(u16,
2448 dsc_max_output_bpp >> 4,
2449 pipe_config->pipe_bpp);
2450 pipe_config->dsc.slice_count = dsc_dp_slice_count;
2453 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
2454 * is greater than the maximum Cdclock and if slice count is even
2455 * then we need to use 2 VDSC instances.
2457 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq ||
2458 pipe_config->bigjoiner) {
2459 if (pipe_config->dsc.slice_count < 2) {
2460 drm_dbg_kms(&dev_priv->drm,
2461 "Cannot split stream to use 2 VDSC instances\n");
2465 pipe_config->dsc.dsc_split = true;
2468 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
2470 drm_dbg_kms(&dev_priv->drm,
2471 "Cannot compute valid DSC parameters for Input Bpp = %d "
2472 "Compressed BPP = %d\n",
2473 pipe_config->pipe_bpp,
2474 pipe_config->dsc.compressed_bpp);
2478 pipe_config->dsc.compression_enable = true;
2479 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
2480 "Compressed Bpp = %d Slice Count = %d\n",
2481 pipe_config->pipe_bpp,
2482 pipe_config->dsc.compressed_bpp,
2483 pipe_config->dsc.slice_count);
2489 intel_dp_compute_link_config(struct intel_encoder *encoder,
2490 struct intel_crtc_state *pipe_config,
2491 struct drm_connector_state *conn_state)
2493 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2494 const struct drm_display_mode *adjusted_mode =
2495 &pipe_config->hw.adjusted_mode;
2496 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2497 struct link_config_limits limits;
2501 common_len = intel_dp_common_len_rate_limit(intel_dp,
2502 intel_dp->max_link_rate);
2504 /* No common link rates between source and sink */
2505 drm_WARN_ON(encoder->base.dev, common_len <= 0);
2507 limits.min_clock = 0;
2508 limits.max_clock = common_len - 1;
2510 limits.min_lane_count = 1;
2511 limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
2513 limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format);
2514 limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config);
2516 if (intel_dp_is_edp(intel_dp)) {
2518 * Use the maximum clock and number of lanes the eDP panel
2519 * advertizes being capable of. The panels are generally
2520 * designed to support only a single clock and lane
2521 * configuration, and typically these values correspond to the
2522 * native resolution of the panel.
2524 limits.min_lane_count = limits.max_lane_count;
2525 limits.min_clock = limits.max_clock;
2528 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
2530 drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i "
2531 "max rate %d max bpp %d pixel clock %iKHz\n",
2532 limits.max_lane_count,
2533 intel_dp->common_rates[limits.max_clock],
2534 limits.max_bpp, adjusted_mode->crtc_clock);
2536 if ((adjusted_mode->crtc_clock > i915->max_dotclk_freq ||
2537 adjusted_mode->crtc_hdisplay > 5120) &&
2538 intel_dp_can_bigjoiner(intel_dp))
2539 pipe_config->bigjoiner = true;
2542 * Optimize for slow and wide. This is the place to add alternative
2543 * optimization policy.
2545 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
2547 /* enable compression if the mode doesn't fit available BW */
2548 drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
2549 if (ret || intel_dp->force_dsc_en || pipe_config->bigjoiner) {
2550 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2551 conn_state, &limits);
2556 if (pipe_config->dsc.compression_enable) {
2557 drm_dbg_kms(&i915->drm,
2558 "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2559 pipe_config->lane_count, pipe_config->port_clock,
2560 pipe_config->pipe_bpp,
2561 pipe_config->dsc.compressed_bpp);
2563 drm_dbg_kms(&i915->drm,
2564 "DP link rate required %i available %i\n",
2565 intel_dp_link_required(adjusted_mode->crtc_clock,
2566 pipe_config->dsc.compressed_bpp),
2567 intel_dp_max_data_rate(pipe_config->port_clock,
2568 pipe_config->lane_count));
2570 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n",
2571 pipe_config->lane_count, pipe_config->port_clock,
2572 pipe_config->pipe_bpp);
2574 drm_dbg_kms(&i915->drm,
2575 "DP link rate required %i available %i\n",
2576 intel_dp_link_required(adjusted_mode->crtc_clock,
2577 pipe_config->pipe_bpp),
2578 intel_dp_max_data_rate(pipe_config->port_clock,
2579 pipe_config->lane_count));
2584 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2585 const struct drm_connector_state *conn_state)
2587 const struct intel_digital_connector_state *intel_conn_state =
2588 to_intel_digital_connector_state(conn_state);
2589 const struct drm_display_mode *adjusted_mode =
2590 &crtc_state->hw.adjusted_mode;
2593 * Our YCbCr output is always limited range.
2594 * crtc_state->limited_color_range only applies to RGB,
2595 * and it must never be set for YCbCr or we risk setting
2596 * some conflicting bits in PIPECONF which will mess up
2597 * the colors on the monitor.
2599 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
2602 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2605 * CEA-861-E - 5.1 Default Encoding Parameters
2606 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2608 return crtc_state->pipe_bpp != 18 &&
2609 drm_default_rgb_quant_range(adjusted_mode) ==
2610 HDMI_QUANTIZATION_RANGE_LIMITED;
2612 return intel_conn_state->broadcast_rgb ==
2613 INTEL_BROADCAST_RGB_LIMITED;
2617 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
2620 if (IS_G4X(dev_priv))
2622 if (INTEL_GEN(dev_priv) < 12 && port == PORT_A)
2628 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state,
2629 const struct drm_connector_state *conn_state,
2630 struct drm_dp_vsc_sdp *vsc)
2632 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2633 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2636 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
2637 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
2638 * Colorimetry Format indication.
2640 vsc->revision = 0x5;
2643 /* DP 1.4a spec, Table 2-120 */
2644 switch (crtc_state->output_format) {
2645 case INTEL_OUTPUT_FORMAT_YCBCR444:
2646 vsc->pixelformat = DP_PIXELFORMAT_YUV444;
2648 case INTEL_OUTPUT_FORMAT_YCBCR420:
2649 vsc->pixelformat = DP_PIXELFORMAT_YUV420;
2651 case INTEL_OUTPUT_FORMAT_RGB:
2653 vsc->pixelformat = DP_PIXELFORMAT_RGB;
2656 switch (conn_state->colorspace) {
2657 case DRM_MODE_COLORIMETRY_BT709_YCC:
2658 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
2660 case DRM_MODE_COLORIMETRY_XVYCC_601:
2661 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601;
2663 case DRM_MODE_COLORIMETRY_XVYCC_709:
2664 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709;
2666 case DRM_MODE_COLORIMETRY_SYCC_601:
2667 vsc->colorimetry = DP_COLORIMETRY_SYCC_601;
2669 case DRM_MODE_COLORIMETRY_OPYCC_601:
2670 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601;
2672 case DRM_MODE_COLORIMETRY_BT2020_CYCC:
2673 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC;
2675 case DRM_MODE_COLORIMETRY_BT2020_RGB:
2676 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB;
2678 case DRM_MODE_COLORIMETRY_BT2020_YCC:
2679 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC;
2681 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
2682 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
2683 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB;
2687 * RGB->YCBCR color conversion uses the BT.709
2690 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2691 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
2693 vsc->colorimetry = DP_COLORIMETRY_DEFAULT;
2697 vsc->bpc = crtc_state->pipe_bpp / 3;
2699 /* only RGB pixelformat supports 6 bpc */
2700 drm_WARN_ON(&dev_priv->drm,
2701 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB);
2703 /* all YCbCr are always limited range */
2704 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA;
2705 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED;
2708 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
2709 struct intel_crtc_state *crtc_state,
2710 const struct drm_connector_state *conn_state)
2712 struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc;
2714 /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */
2715 if (crtc_state->has_psr)
2718 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
2721 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
2722 vsc->sdp_type = DP_SDP_VSC;
2723 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
2724 &crtc_state->infoframes.vsc);
2727 void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
2728 const struct intel_crtc_state *crtc_state,
2729 const struct drm_connector_state *conn_state,
2730 struct drm_dp_vsc_sdp *vsc)
2732 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2734 vsc->sdp_type = DP_SDP_VSC;
2736 if (dev_priv->psr.psr2_enabled) {
2737 if (dev_priv->psr.colorimetry_support &&
2738 intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
2739 /* [PSR2, +Colorimetry] */
2740 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
2744 * [PSR2, -Colorimetry]
2745 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
2746 * 3D stereo + PSR/PSR2 + Y-coordinate.
2748 vsc->revision = 0x4;
2754 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
2755 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or
2758 vsc->revision = 0x2;
2764 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
2765 struct intel_crtc_state *crtc_state,
2766 const struct drm_connector_state *conn_state)
2769 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2770 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm;
2772 if (!conn_state->hdr_output_metadata)
2775 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state);
2778 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n");
2782 crtc_state->infoframes.enable |=
2783 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
2787 intel_dp_drrs_compute_config(struct intel_dp *intel_dp,
2788 struct intel_crtc_state *pipe_config,
2789 int output_bpp, bool constant_n)
2791 struct intel_connector *intel_connector = intel_dp->attached_connector;
2792 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2795 * DRRS and PSR can't be enable together, so giving preference to PSR
2796 * as it allows more power-savings by complete shutting down display,
2797 * so to guarantee this, intel_dp_drrs_compute_config() must be called
2798 * after intel_psr_compute_config().
2800 if (pipe_config->has_psr)
2803 if (!intel_connector->panel.downclock_mode ||
2804 dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
2807 pipe_config->has_drrs = true;
2808 intel_link_compute_m_n(output_bpp, pipe_config->lane_count,
2809 intel_connector->panel.downclock_mode->clock,
2810 pipe_config->port_clock, &pipe_config->dp_m2_n2,
2811 constant_n, pipe_config->fec_enable);
2815 intel_dp_compute_config(struct intel_encoder *encoder,
2816 struct intel_crtc_state *pipe_config,
2817 struct drm_connector_state *conn_state)
2819 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2820 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2821 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2822 enum port port = encoder->port;
2823 struct intel_connector *intel_connector = intel_dp->attached_connector;
2824 struct intel_digital_connector_state *intel_conn_state =
2825 to_intel_digital_connector_state(conn_state);
2826 bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
2827 DP_DPCD_QUIRK_CONSTANT_N);
2828 int ret = 0, output_bpp;
2830 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2831 pipe_config->has_pch_encoder = true;
2833 pipe_config->output_format = intel_dp_output_format(&intel_connector->base,
2836 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
2837 ret = intel_pch_panel_fitting(pipe_config, conn_state);
2842 if (!intel_dp_port_has_audio(dev_priv, port))
2843 pipe_config->has_audio = false;
2844 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2845 pipe_config->has_audio = intel_dp->has_audio;
2847 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
2849 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2850 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
2853 if (HAS_GMCH(dev_priv))
2854 ret = intel_gmch_panel_fitting(pipe_config, conn_state);
2856 ret = intel_pch_panel_fitting(pipe_config, conn_state);
2861 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
2864 if (HAS_GMCH(dev_priv) &&
2865 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
2868 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2871 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
2874 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
2878 pipe_config->limited_color_range =
2879 intel_dp_limited_color_range(pipe_config, conn_state);
2881 if (pipe_config->dsc.compression_enable)
2882 output_bpp = pipe_config->dsc.compressed_bpp;
2884 output_bpp = intel_dp_output_bpp(pipe_config->output_format,
2885 pipe_config->pipe_bpp);
2887 intel_link_compute_m_n(output_bpp,
2888 pipe_config->lane_count,
2889 adjusted_mode->crtc_clock,
2890 pipe_config->port_clock,
2891 &pipe_config->dp_m_n,
2892 constant_n, pipe_config->fec_enable);
2894 if (!HAS_DDI(dev_priv))
2895 intel_dp_set_clock(encoder, pipe_config);
2897 intel_psr_compute_config(intel_dp, pipe_config);
2898 intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp,
2900 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
2901 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
2906 void intel_dp_set_link_params(struct intel_dp *intel_dp,
2907 int link_rate, int lane_count)
2909 intel_dp->link_trained = false;
2910 intel_dp->link_rate = link_rate;
2911 intel_dp->lane_count = lane_count;
2914 static void intel_dp_prepare(struct intel_encoder *encoder,
2915 const struct intel_crtc_state *pipe_config)
2917 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2918 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2919 enum port port = encoder->port;
2920 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
2921 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2923 intel_dp_set_link_params(intel_dp,
2924 pipe_config->port_clock,
2925 pipe_config->lane_count);
2928 * There are four kinds of DP registers:
2935 * IBX PCH and CPU are the same for almost everything,
2936 * except that the CPU DP PLL is configured in this
2939 * CPT PCH is quite different, having many bits moved
2940 * to the TRANS_DP_CTL register instead. That
2941 * configuration happens (oddly) in ilk_pch_enable
2944 /* Preserve the BIOS-computed detected bit. This is
2945 * supposed to be read-only.
2947 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
2949 /* Handle DP bits in common between all three register formats */
2950 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
2951 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
2953 /* Split out the IBX/CPU vs CPT settings */
2955 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
2956 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2957 intel_dp->DP |= DP_SYNC_HS_HIGH;
2958 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2959 intel_dp->DP |= DP_SYNC_VS_HIGH;
2960 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2962 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2963 intel_dp->DP |= DP_ENHANCED_FRAMING;
2965 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
2966 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2969 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2971 trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe));
2972 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2973 trans_dp |= TRANS_DP_ENH_FRAMING;
2975 trans_dp &= ~TRANS_DP_ENH_FRAMING;
2976 intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp);
2978 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
2979 intel_dp->DP |= DP_COLOR_RANGE_16_235;
2981 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2982 intel_dp->DP |= DP_SYNC_HS_HIGH;
2983 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2984 intel_dp->DP |= DP_SYNC_VS_HIGH;
2985 intel_dp->DP |= DP_LINK_TRAIN_OFF;
2987 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2988 intel_dp->DP |= DP_ENHANCED_FRAMING;
2990 if (IS_CHERRYVIEW(dev_priv))
2991 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
2993 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
2997 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
2998 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
3000 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
3001 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
3003 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
3004 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
3006 static void intel_pps_verify_state(struct intel_dp *intel_dp);
3008 static void wait_panel_status(struct intel_dp *intel_dp,
3012 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3013 i915_reg_t pp_stat_reg, pp_ctrl_reg;
3015 lockdep_assert_held(&dev_priv->pps_mutex);
3017 intel_pps_verify_state(intel_dp);
3019 pp_stat_reg = _pp_stat_reg(intel_dp);
3020 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3022 drm_dbg_kms(&dev_priv->drm,
3023 "mask %08x value %08x status %08x control %08x\n",
3025 intel_de_read(dev_priv, pp_stat_reg),
3026 intel_de_read(dev_priv, pp_ctrl_reg));
3028 if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
3030 drm_err(&dev_priv->drm,
3031 "Panel status timeout: status %08x control %08x\n",
3032 intel_de_read(dev_priv, pp_stat_reg),
3033 intel_de_read(dev_priv, pp_ctrl_reg));
3035 drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
3038 static void wait_panel_on(struct intel_dp *intel_dp)
3040 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3042 drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
3043 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
3046 static void wait_panel_off(struct intel_dp *intel_dp)
3048 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3050 drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
3051 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
3054 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
3056 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3057 ktime_t panel_power_on_time;
3058 s64 panel_power_off_duration;
3060 drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
3062 /* take the difference of currrent time and panel power off time
3063 * and then make panel wait for t11_t12 if needed. */
3064 panel_power_on_time = ktime_get_boottime();
3065 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
3067 /* When we disable the VDD override bit last we have to do the manual
3069 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
3070 wait_remaining_ms_from_jiffies(jiffies,
3071 intel_dp->panel_power_cycle_delay - panel_power_off_duration);
3073 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
3076 static void wait_backlight_on(struct intel_dp *intel_dp)
3078 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
3079 intel_dp->backlight_on_delay);
3082 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
3084 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
3085 intel_dp->backlight_off_delay);
3088 /* Read the current pp_control value, unlocking the register if it
3092 static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
3094 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3097 lockdep_assert_held(&dev_priv->pps_mutex);
3099 control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
3100 if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
3101 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
3102 control &= ~PANEL_UNLOCK_MASK;
3103 control |= PANEL_UNLOCK_REGS;
3109 * Must be paired with edp_panel_vdd_off().
3110 * Must hold pps_mutex around the whole on/off sequence.
3111 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
3113 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
3115 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3116 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3118 i915_reg_t pp_stat_reg, pp_ctrl_reg;
3119 bool need_to_disable = !intel_dp->want_panel_vdd;
3121 lockdep_assert_held(&dev_priv->pps_mutex);
3123 if (!intel_dp_is_edp(intel_dp))
3126 cancel_delayed_work(&intel_dp->panel_vdd_work);
3127 intel_dp->want_panel_vdd = true;
3129 if (edp_have_panel_vdd(intel_dp))
3130 return need_to_disable;
3132 drm_WARN_ON(&dev_priv->drm, intel_dp->vdd_wakeref);
3133 intel_dp->vdd_wakeref = intel_display_power_get(dev_priv,
3134 intel_aux_power_domain(dig_port));
3136 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
3137 dig_port->base.base.base.id,
3138 dig_port->base.base.name);
3140 if (!edp_have_panel_power(intel_dp))
3141 wait_panel_power_cycle(intel_dp);
3143 pp = ilk_get_pp_control(intel_dp);
3144 pp |= EDP_FORCE_VDD;
3146 pp_stat_reg = _pp_stat_reg(intel_dp);
3147 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3149 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3150 intel_de_posting_read(dev_priv, pp_ctrl_reg);
3151 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
3152 intel_de_read(dev_priv, pp_stat_reg),
3153 intel_de_read(dev_priv, pp_ctrl_reg));
3155 * If the panel wasn't on, delay before accessing aux channel
3157 if (!edp_have_panel_power(intel_dp)) {
3158 drm_dbg_kms(&dev_priv->drm,
3159 "[ENCODER:%d:%s] panel power wasn't enabled\n",
3160 dig_port->base.base.base.id,
3161 dig_port->base.base.name);
3162 msleep(intel_dp->panel_power_up_delay);
3165 return need_to_disable;
3169 * Must be paired with intel_edp_panel_vdd_off() or
3170 * intel_edp_panel_off().
3171 * Nested calls to these functions are not allowed since
3172 * we drop the lock. Caller must use some higher level
3173 * locking to prevent nested calls from other threads.
3175 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
3177 intel_wakeref_t wakeref;
3180 if (!intel_dp_is_edp(intel_dp))
3184 with_pps_lock(intel_dp, wakeref)
3185 vdd = edp_panel_vdd_on(intel_dp);
3186 I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
3187 dp_to_dig_port(intel_dp)->base.base.base.id,
3188 dp_to_dig_port(intel_dp)->base.base.name);
3191 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
3193 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3194 struct intel_digital_port *dig_port =
3195 dp_to_dig_port(intel_dp);
3197 i915_reg_t pp_stat_reg, pp_ctrl_reg;
3199 lockdep_assert_held(&dev_priv->pps_mutex);
3201 drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd);
3203 if (!edp_have_panel_vdd(intel_dp))
3206 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
3207 dig_port->base.base.base.id,
3208 dig_port->base.base.name);
3210 pp = ilk_get_pp_control(intel_dp);
3211 pp &= ~EDP_FORCE_VDD;
3213 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3214 pp_stat_reg = _pp_stat_reg(intel_dp);
3216 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3217 intel_de_posting_read(dev_priv, pp_ctrl_reg);
3219 /* Make sure sequencer is idle before allowing subsequent activity */
3220 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
3221 intel_de_read(dev_priv, pp_stat_reg),
3222 intel_de_read(dev_priv, pp_ctrl_reg));
3224 if ((pp & PANEL_POWER_ON) == 0)
3225 intel_dp->panel_power_off_time = ktime_get_boottime();
3227 intel_display_power_put(dev_priv,
3228 intel_aux_power_domain(dig_port),
3229 fetch_and_zero(&intel_dp->vdd_wakeref));
3232 static void edp_panel_vdd_work(struct work_struct *__work)
3234 struct intel_dp *intel_dp =
3235 container_of(to_delayed_work(__work),
3236 struct intel_dp, panel_vdd_work);
3237 intel_wakeref_t wakeref;
3239 with_pps_lock(intel_dp, wakeref) {
3240 if (!intel_dp->want_panel_vdd)
3241 edp_panel_vdd_off_sync(intel_dp);
3245 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
3247 unsigned long delay;
3250 * Queue the timer to fire a long time from now (relative to the power
3251 * down delay) to keep the panel power up across a sequence of
3254 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
3255 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
3259 * Must be paired with edp_panel_vdd_on().
3260 * Must hold pps_mutex around the whole on/off sequence.
3261 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
3263 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
3265 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3267 lockdep_assert_held(&dev_priv->pps_mutex);
3269 if (!intel_dp_is_edp(intel_dp))
3272 I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
3273 dp_to_dig_port(intel_dp)->base.base.base.id,
3274 dp_to_dig_port(intel_dp)->base.base.name);
3276 intel_dp->want_panel_vdd = false;
3279 edp_panel_vdd_off_sync(intel_dp);
3281 edp_panel_vdd_schedule_off(intel_dp);
3284 static void edp_panel_on(struct intel_dp *intel_dp)
3286 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3288 i915_reg_t pp_ctrl_reg;
3290 lockdep_assert_held(&dev_priv->pps_mutex);
3292 if (!intel_dp_is_edp(intel_dp))
3295 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
3296 dp_to_dig_port(intel_dp)->base.base.base.id,
3297 dp_to_dig_port(intel_dp)->base.base.name);
3299 if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
3300 "[ENCODER:%d:%s] panel power already on\n",
3301 dp_to_dig_port(intel_dp)->base.base.base.id,
3302 dp_to_dig_port(intel_dp)->base.base.name))
3305 wait_panel_power_cycle(intel_dp);
3307 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3308 pp = ilk_get_pp_control(intel_dp);
3309 if (IS_GEN(dev_priv, 5)) {
3310 /* ILK workaround: disable reset around power sequence */
3311 pp &= ~PANEL_POWER_RESET;
3312 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3313 intel_de_posting_read(dev_priv, pp_ctrl_reg);
3316 pp |= PANEL_POWER_ON;
3317 if (!IS_GEN(dev_priv, 5))
3318 pp |= PANEL_POWER_RESET;
3320 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3321 intel_de_posting_read(dev_priv, pp_ctrl_reg);
3323 wait_panel_on(intel_dp);
3324 intel_dp->last_power_on = jiffies;
3326 if (IS_GEN(dev_priv, 5)) {
3327 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
3328 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3329 intel_de_posting_read(dev_priv, pp_ctrl_reg);
3333 void intel_edp_panel_on(struct intel_dp *intel_dp)
3335 intel_wakeref_t wakeref;
3337 if (!intel_dp_is_edp(intel_dp))
3340 with_pps_lock(intel_dp, wakeref)
3341 edp_panel_on(intel_dp);
3345 static void edp_panel_off(struct intel_dp *intel_dp)
3347 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3348 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3350 i915_reg_t pp_ctrl_reg;
3352 lockdep_assert_held(&dev_priv->pps_mutex);
3354 if (!intel_dp_is_edp(intel_dp))
3357 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
3358 dig_port->base.base.base.id, dig_port->base.base.name);
3360 drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd,
3361 "Need [ENCODER:%d:%s] VDD to turn off panel\n",
3362 dig_port->base.base.base.id, dig_port->base.base.name);
3364 pp = ilk_get_pp_control(intel_dp);
3365 /* We need to switch off panel power _and_ force vdd, for otherwise some
3366 * panels get very unhappy and cease to work. */
3367 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
3370 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3372 intel_dp->want_panel_vdd = false;
3374 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3375 intel_de_posting_read(dev_priv, pp_ctrl_reg);
3377 wait_panel_off(intel_dp);
3378 intel_dp->panel_power_off_time = ktime_get_boottime();
3380 /* We got a reference when we enabled the VDD. */
3381 intel_display_power_put(dev_priv,
3382 intel_aux_power_domain(dig_port),
3383 fetch_and_zero(&intel_dp->vdd_wakeref));
3386 void intel_edp_panel_off(struct intel_dp *intel_dp)
3388 intel_wakeref_t wakeref;
3390 if (!intel_dp_is_edp(intel_dp))
3393 with_pps_lock(intel_dp, wakeref)
3394 edp_panel_off(intel_dp);
3397 /* Enable backlight in the panel power control. */
3398 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
3400 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3401 intel_wakeref_t wakeref;
3404 * If we enable the backlight right away following a panel power
3405 * on, we may see slight flicker as the panel syncs with the eDP
3406 * link. So delay a bit to make sure the image is solid before
3407 * allowing it to appear.
3409 wait_backlight_on(intel_dp);
3411 with_pps_lock(intel_dp, wakeref) {
3412 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3415 pp = ilk_get_pp_control(intel_dp);
3416 pp |= EDP_BLC_ENABLE;
3418 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3419 intel_de_posting_read(dev_priv, pp_ctrl_reg);
3423 /* Enable backlight PWM and backlight PP control. */
3424 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
3425 const struct drm_connector_state *conn_state)
3427 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
3428 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3430 if (!intel_dp_is_edp(intel_dp))
3433 drm_dbg_kms(&i915->drm, "\n");
3435 intel_panel_enable_backlight(crtc_state, conn_state);
3436 _intel_edp_backlight_on(intel_dp);
3439 /* Disable backlight in the panel power control. */
3440 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
3442 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3443 intel_wakeref_t wakeref;
3445 if (!intel_dp_is_edp(intel_dp))
3448 with_pps_lock(intel_dp, wakeref) {
3449 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3452 pp = ilk_get_pp_control(intel_dp);
3453 pp &= ~EDP_BLC_ENABLE;
3455 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3456 intel_de_posting_read(dev_priv, pp_ctrl_reg);
3459 intel_dp->last_backlight_off = jiffies;
3460 edp_wait_backlight_off(intel_dp);
3463 /* Disable backlight PP control and backlight PWM. */
3464 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
3466 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
3467 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3469 if (!intel_dp_is_edp(intel_dp))
3472 drm_dbg_kms(&i915->drm, "\n");
3474 _intel_edp_backlight_off(intel_dp);
3475 intel_panel_disable_backlight(old_conn_state);
3479 * Hook for controlling the panel power control backlight through the bl_power
3480 * sysfs attribute. Take care to handle multiple calls.
3482 static void intel_edp_backlight_power(struct intel_connector *connector,
3485 struct drm_i915_private *i915 = to_i915(connector->base.dev);
3486 struct intel_dp *intel_dp = intel_attached_dp(connector);
3487 intel_wakeref_t wakeref;
3491 with_pps_lock(intel_dp, wakeref)
3492 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
3493 if (is_enabled == enable)
3496 drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
3497 enable ? "enable" : "disable");
3500 _intel_edp_backlight_on(intel_dp);
3502 _intel_edp_backlight_off(intel_dp);
3505 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
3507 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3508 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3509 bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN;
3511 I915_STATE_WARN(cur_state != state,
3512 "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
3513 dig_port->base.base.base.id, dig_port->base.base.name,
3514 onoff(state), onoff(cur_state));
3516 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
3518 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
3520 bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE;
3522 I915_STATE_WARN(cur_state != state,
3523 "eDP PLL state assertion failure (expected %s, current %s)\n",
3524 onoff(state), onoff(cur_state));
3526 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
3527 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
3529 static void ilk_edp_pll_on(struct intel_dp *intel_dp,
3530 const struct intel_crtc_state *pipe_config)
3532 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
3533 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3535 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
3536 assert_dp_port_disabled(intel_dp);
3537 assert_edp_pll_disabled(dev_priv);
3539 drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n",
3540 pipe_config->port_clock);
3542 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
3544 if (pipe_config->port_clock == 162000)
3545 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
3547 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
3549 intel_de_write(dev_priv, DP_A, intel_dp->DP);
3550 intel_de_posting_read(dev_priv, DP_A);
3554 * [DevILK] Work around required when enabling DP PLL
3555 * while a pipe is enabled going to FDI:
3556 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
3557 * 2. Program DP PLL enable
3559 if (IS_GEN(dev_priv, 5))
3560 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
3562 intel_dp->DP |= DP_PLL_ENABLE;
3564 intel_de_write(dev_priv, DP_A, intel_dp->DP);
3565 intel_de_posting_read(dev_priv, DP_A);
3569 static void ilk_edp_pll_off(struct intel_dp *intel_dp,
3570 const struct intel_crtc_state *old_crtc_state)
3572 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3573 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3575 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
3576 assert_dp_port_disabled(intel_dp);
3577 assert_edp_pll_enabled(dev_priv);
3579 drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n");
3581 intel_dp->DP &= ~DP_PLL_ENABLE;
3583 intel_de_write(dev_priv, DP_A, intel_dp->DP);
3584 intel_de_posting_read(dev_priv, DP_A);
3588 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
3591 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
3592 * be capable of signalling downstream hpd with a long pulse.
3593 * Whether or not that means D3 is safe to use is not clear,
3594 * but let's assume so until proven otherwise.
3596 * FIXME should really check all downstream ports...
3598 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
3599 drm_dp_is_branch(intel_dp->dpcd) &&
3600 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
3603 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
3604 const struct intel_crtc_state *crtc_state,
3607 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3610 if (!crtc_state->dsc.compression_enable)
3613 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
3614 enable ? DP_DECOMPRESSION_EN : 0);
3616 drm_dbg_kms(&i915->drm,
3617 "Failed to %s sink decompression state\n",
3618 enable ? "enable" : "disable");
3622 intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
3624 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3625 u8 oui[] = { 0x00, 0xaa, 0x01 };
3629 * During driver init, we want to be careful and avoid changing the source OUI if it's
3630 * already set to what we want, so as to avoid clearing any state by accident
3633 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0)
3634 drm_err(&i915->drm, "Failed to read source OUI\n");
3636 if (memcmp(oui, buf, sizeof(oui)) == 0)
3640 if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
3641 drm_err(&i915->drm, "Failed to write source OUI\n");
3644 /* If the device supports it, try to set the power state appropriately */
3645 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
3647 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3648 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3651 /* Should have a valid DPCD by this point */
3652 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
3655 if (mode != DP_SET_POWER_D0) {
3656 if (downstream_hpd_needs_d0(intel_dp))
3659 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
3661 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
3663 lspcon_resume(dp_to_dig_port(intel_dp));
3665 /* Write the source OUI as early as possible */
3666 if (intel_dp_is_edp(intel_dp))
3667 intel_edp_init_source_oui(intel_dp, false);
3670 * When turning on, we need to retry for 1ms to give the sink
3673 for (i = 0; i < 3; i++) {
3674 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
3680 if (ret == 1 && lspcon->active)
3681 lspcon_wait_pcon_mode(lspcon);
3685 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n",
3686 encoder->base.base.id, encoder->base.name,
3687 mode == DP_SET_POWER_D0 ? "D0" : "D3");
3690 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
3691 enum port port, enum pipe *pipe)
3695 for_each_pipe(dev_priv, p) {
3696 u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p));
3698 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
3704 drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n",
3707 /* must initialize pipe to something for the asserts */
3713 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
3714 i915_reg_t dp_reg, enum port port,
3720 val = intel_de_read(dev_priv, dp_reg);
3722 ret = val & DP_PORT_EN;
3724 /* asserts want to know the pipe even if the port is disabled */
3725 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3726 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
3727 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3728 ret &= cpt_dp_port_selected(dev_priv, port, pipe);
3729 else if (IS_CHERRYVIEW(dev_priv))
3730 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
3732 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
3737 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
3740 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3741 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3742 intel_wakeref_t wakeref;
3745 wakeref = intel_display_power_get_if_enabled(dev_priv,
3746 encoder->power_domain);
3750 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
3751 encoder->port, pipe);
3753 intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
3758 static void intel_dp_get_config(struct intel_encoder *encoder,
3759 struct intel_crtc_state *pipe_config)
3761 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3762 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3764 enum port port = encoder->port;
3765 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
3767 if (encoder->type == INTEL_OUTPUT_EDP)
3768 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
3770 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
3772 tmp = intel_de_read(dev_priv, intel_dp->output_reg);
3774 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
3776 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
3777 u32 trans_dp = intel_de_read(dev_priv,
3778 TRANS_DP_CTL(crtc->pipe));
3780 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
3781 flags |= DRM_MODE_FLAG_PHSYNC;
3783 flags |= DRM_MODE_FLAG_NHSYNC;
3785 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
3786 flags |= DRM_MODE_FLAG_PVSYNC;
3788 flags |= DRM_MODE_FLAG_NVSYNC;
3790 if (tmp & DP_SYNC_HS_HIGH)
3791 flags |= DRM_MODE_FLAG_PHSYNC;
3793 flags |= DRM_MODE_FLAG_NHSYNC;
3795 if (tmp & DP_SYNC_VS_HIGH)
3796 flags |= DRM_MODE_FLAG_PVSYNC;
3798 flags |= DRM_MODE_FLAG_NVSYNC;
3801 pipe_config->hw.adjusted_mode.flags |= flags;
3803 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
3804 pipe_config->limited_color_range = true;
3806 pipe_config->lane_count =
3807 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
3809 intel_dp_get_m_n(crtc, pipe_config);
3811 if (port == PORT_A) {
3812 if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
3813 pipe_config->port_clock = 162000;
3815 pipe_config->port_clock = 270000;
3818 pipe_config->hw.adjusted_mode.crtc_clock =
3819 intel_dotclock_calculate(pipe_config->port_clock,
3820 &pipe_config->dp_m_n);
3822 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
3823 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
3825 * This is a big fat ugly hack.
3827 * Some machines in UEFI boot mode provide us a VBT that has 18
3828 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3829 * unknown we fail to light up. Yet the same BIOS boots up with
3830 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3831 * max, not what it tells us to use.
3833 * Note: This will still be broken if the eDP panel is not lit
3834 * up by the BIOS, and thus we can't get the mode at module
3837 drm_dbg_kms(&dev_priv->drm,
3838 "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3839 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
3840 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
3845 intel_dp_get_dpcd(struct intel_dp *intel_dp);
3848 * intel_dp_sync_state - sync the encoder state during init/resume
3849 * @encoder: intel encoder to sync
3850 * @crtc_state: state for the CRTC connected to the encoder
3852 * Sync any state stored in the encoder wrt. HW state during driver init
3853 * and system resume.
3855 void intel_dp_sync_state(struct intel_encoder *encoder,
3856 const struct intel_crtc_state *crtc_state)
3858 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3861 * Don't clobber DPCD if it's been already read out during output
3862 * setup (eDP) or detect.
3864 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3865 intel_dp_get_dpcd(intel_dp);
3867 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
3868 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
3871 bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
3872 struct intel_crtc_state *crtc_state)
3874 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3875 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3878 * If BIOS has set an unsupported or non-standard link rate for some
3879 * reason force an encoder recompute and full modeset.
3881 if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates,
3882 crtc_state->port_clock) < 0) {
3883 drm_dbg_kms(&i915->drm, "Forcing full modeset due to unsupported link rate\n");
3884 crtc_state->uapi.connectors_changed = true;
3889 * FIXME hack to force full modeset when DSC is being used.
3891 * As long as we do not have full state readout and config comparison
3892 * of crtc_state->dsc, we have no way to ensure reliable fastset.
3893 * Remove once we have readout for DSC.
3895 if (crtc_state->dsc.compression_enable) {
3896 drm_dbg_kms(&i915->drm, "Forcing full modeset due to DSC being enabled\n");
3897 crtc_state->uapi.mode_changed = true;
3901 if (CAN_PSR(i915) && intel_dp_is_edp(intel_dp)) {
3902 drm_dbg_kms(&i915->drm, "Forcing full modeset to compute PSR state\n");
3903 crtc_state->uapi.mode_changed = true;
3910 static void intel_disable_dp(struct intel_atomic_state *state,
3911 struct intel_encoder *encoder,
3912 const struct intel_crtc_state *old_crtc_state,
3913 const struct drm_connector_state *old_conn_state)
3915 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3917 intel_dp->link_trained = false;
3919 if (old_crtc_state->has_audio)
3920 intel_audio_codec_disable(encoder,
3921 old_crtc_state, old_conn_state);
3923 /* Make sure the panel is off before trying to change the mode. But also
3924 * ensure that we have vdd while we switch off the panel. */
3925 intel_edp_panel_vdd_on(intel_dp);
3926 intel_edp_backlight_off(old_conn_state);
3927 intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
3928 intel_edp_panel_off(intel_dp);
3929 intel_dp->frl.is_trained = false;
3930 intel_dp->frl.trained_rate_gbps = 0;
3933 static void g4x_disable_dp(struct intel_atomic_state *state,
3934 struct intel_encoder *encoder,
3935 const struct intel_crtc_state *old_crtc_state,
3936 const struct drm_connector_state *old_conn_state)
3938 intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
3941 static void vlv_disable_dp(struct intel_atomic_state *state,
3942 struct intel_encoder *encoder,
3943 const struct intel_crtc_state *old_crtc_state,
3944 const struct drm_connector_state *old_conn_state)
3946 intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
3949 static void g4x_post_disable_dp(struct intel_atomic_state *state,
3950 struct intel_encoder *encoder,
3951 const struct intel_crtc_state *old_crtc_state,
3952 const struct drm_connector_state *old_conn_state)
3954 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3955 enum port port = encoder->port;
3958 * Bspec does not list a specific disable sequence for g4x DP.
3959 * Follow the ilk+ sequence (disable pipe before the port) for
3960 * g4x DP as it does not suffer from underruns like the normal
3961 * g4x modeset sequence (disable pipe after the port).
3963 intel_dp_link_down(encoder, old_crtc_state);
3965 /* Only ilk+ has port A */
3967 ilk_edp_pll_off(intel_dp, old_crtc_state);
3970 static void vlv_post_disable_dp(struct intel_atomic_state *state,
3971 struct intel_encoder *encoder,
3972 const struct intel_crtc_state *old_crtc_state,
3973 const struct drm_connector_state *old_conn_state)
3975 intel_dp_link_down(encoder, old_crtc_state);
3978 static void chv_post_disable_dp(struct intel_atomic_state *state,
3979 struct intel_encoder *encoder,
3980 const struct intel_crtc_state *old_crtc_state,
3981 const struct drm_connector_state *old_conn_state)
3983 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3985 intel_dp_link_down(encoder, old_crtc_state);
3987 vlv_dpio_get(dev_priv);
3989 /* Assert data lane reset */
3990 chv_data_lane_soft_reset(encoder, old_crtc_state, true);
3992 vlv_dpio_put(dev_priv);
3996 cpt_set_link_train(struct intel_dp *intel_dp,
3997 const struct intel_crtc_state *crtc_state,
4000 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4001 u32 *DP = &intel_dp->DP;
4003 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
4005 switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
4006 case DP_TRAINING_PATTERN_DISABLE:
4007 *DP |= DP_LINK_TRAIN_OFF_CPT;
4009 case DP_TRAINING_PATTERN_1:
4010 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
4012 case DP_TRAINING_PATTERN_2:
4013 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
4015 case DP_TRAINING_PATTERN_3:
4016 drm_dbg_kms(&dev_priv->drm,
4017 "TPS3 not supported, using TPS2 instead\n");
4018 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
4022 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
4023 intel_de_posting_read(dev_priv, intel_dp->output_reg);
4026 static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
4028 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4030 /* Clear the cached register set to avoid using stale values */
4032 memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd));
4034 if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER,
4035 intel_dp->pcon_dsc_dpcd,
4036 sizeof(intel_dp->pcon_dsc_dpcd)) < 0)
4037 drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n",
4038 DP_PCON_DSC_ENCODER);
4040 drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n",
4041 (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd);
4044 static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask)
4046 int bw_gbps[] = {9, 18, 24, 32, 40, 48};
4049 for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) {
4050 if (frl_bw_mask & (1 << i))
4056 static int intel_dp_pcon_set_frl_mask(int max_frl)
4060 return DP_PCON_FRL_BW_MASK_48GBPS;
4062 return DP_PCON_FRL_BW_MASK_40GBPS;
4064 return DP_PCON_FRL_BW_MASK_32GBPS;
4066 return DP_PCON_FRL_BW_MASK_24GBPS;
4068 return DP_PCON_FRL_BW_MASK_18GBPS;
4070 return DP_PCON_FRL_BW_MASK_9GBPS;
4076 static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp)
4078 struct intel_connector *intel_connector = intel_dp->attached_connector;
4079 struct drm_connector *connector = &intel_connector->base;
4081 int max_lanes, rate_per_lane;
4082 int max_dsc_lanes, dsc_rate_per_lane;
4084 max_lanes = connector->display_info.hdmi.max_lanes;
4085 rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane;
4086 max_frl_rate = max_lanes * rate_per_lane;
4088 if (connector->display_info.hdmi.dsc_cap.v_1p2) {
4089 max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes;
4090 dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane;
4091 if (max_dsc_lanes && dsc_rate_per_lane)
4092 max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane);
4095 return max_frl_rate;
4098 static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
4100 #define PCON_EXTENDED_TRAIN_MODE (1 > 0)
4101 #define PCON_CONCURRENT_MODE (1 > 0)
4102 #define PCON_SEQUENTIAL_MODE !PCON_CONCURRENT_MODE
4103 #define PCON_NORMAL_TRAIN_MODE !PCON_EXTENDED_TRAIN_MODE
4104 #define TIMEOUT_FRL_READY_MS 500
4105 #define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000
4107 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4108 int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret;
4109 u8 max_frl_bw_mask = 0, frl_trained_mask;
4112 ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
4116 max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
4117 drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw);
4119 max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp);
4120 drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw);
4122 max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw);
4124 if (max_frl_bw <= 0)
4127 ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false);
4130 /* Wait for PCON to be FRL Ready */
4131 wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS);
4136 max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw);
4137 ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, PCON_SEQUENTIAL_MODE);
4140 ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, PCON_NORMAL_TRAIN_MODE);
4143 ret = drm_dp_pcon_frl_enable(&intel_dp->aux);
4147 * Wait for FRL to be completed
4148 * Check if the HDMI Link is up and active.
4150 wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS);
4155 /* Verify HDMI Link configuration shows FRL Mode */
4156 if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) !=
4157 DP_PCON_HDMI_MODE_FRL) {
4158 drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n");
4161 drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask);
4163 intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask);
4164 intel_dp->frl.is_trained = true;
4165 drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps);
4170 static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp)
4172 if (drm_dp_is_branch(intel_dp->dpcd) &&
4173 intel_dp->has_hdmi_sink &&
4174 intel_dp_hdmi_sink_max_frl(intel_dp) > 0)
4180 void intel_dp_check_frl_training(struct intel_dp *intel_dp)
4182 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4184 /* Always go for FRL training if supported */
4185 if (!intel_dp_is_hdmi_2_1_sink(intel_dp) ||
4186 intel_dp->frl.is_trained)
4189 if (intel_dp_pcon_start_frl_training(intel_dp) < 0) {
4192 drm_dbg(&dev_priv->drm, "Couldnt set FRL mode, continuing with TMDS mode\n");
4193 ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
4194 mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL);
4196 if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS)
4197 drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n");
4199 drm_dbg(&dev_priv->drm, "FRL training Completed\n");
4204 intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state)
4206 int vactive = crtc_state->hw.adjusted_mode.vdisplay;
4208 return intel_hdmi_dsc_get_slice_height(vactive);
4212 intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp,
4213 const struct intel_crtc_state *crtc_state)
4215 struct intel_connector *intel_connector = intel_dp->attached_connector;
4216 struct drm_connector *connector = &intel_connector->base;
4217 int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice;
4218 int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices;
4219 int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd);
4220 int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd);
4222 return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices,
4223 pcon_max_slice_width,
4224 hdmi_max_slices, hdmi_throughput);
4228 intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp,
4229 const struct intel_crtc_state *crtc_state,
4230 int num_slices, int slice_width)
4232 struct intel_connector *intel_connector = intel_dp->attached_connector;
4233 struct drm_connector *connector = &intel_connector->base;
4234 int output_format = crtc_state->output_format;
4235 bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp;
4236 int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd);
4237 int hdmi_max_chunk_bytes =
4238 connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024;
4240 return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width,
4241 num_slices, output_format, hdmi_all_bpp,
4242 hdmi_max_chunk_bytes);
4246 intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
4247 const struct intel_crtc_state *crtc_state)
4255 struct intel_connector *intel_connector = intel_dp->attached_connector;
4256 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4257 struct drm_connector *connector;
4258 bool hdmi_is_dsc_1_2;
4260 if (!intel_dp_is_hdmi_2_1_sink(intel_dp))
4263 if (!intel_connector)
4265 connector = &intel_connector->base;
4266 hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2;
4268 if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) ||
4272 slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state);
4276 num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state);
4280 slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay,
4283 bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state,
4284 num_slices, slice_width);
4285 if (!bits_per_pixel)
4288 pps_param[0] = slice_height & 0xFF;
4289 pps_param[1] = slice_height >> 8;
4290 pps_param[2] = slice_width & 0xFF;
4291 pps_param[3] = slice_width >> 8;
4292 pps_param[4] = bits_per_pixel & 0xFF;
4293 pps_param[5] = (bits_per_pixel >> 8) & 0x3;
4295 ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param);
4297 drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n");
4301 g4x_set_link_train(struct intel_dp *intel_dp,
4302 const struct intel_crtc_state *crtc_state,
4305 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4306 u32 *DP = &intel_dp->DP;
4308 *DP &= ~DP_LINK_TRAIN_MASK;
4310 switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
4311 case DP_TRAINING_PATTERN_DISABLE:
4312 *DP |= DP_LINK_TRAIN_OFF;
4314 case DP_TRAINING_PATTERN_1:
4315 *DP |= DP_LINK_TRAIN_PAT_1;
4317 case DP_TRAINING_PATTERN_2:
4318 *DP |= DP_LINK_TRAIN_PAT_2;
4320 case DP_TRAINING_PATTERN_3:
4321 drm_dbg_kms(&dev_priv->drm,
4322 "TPS3 not supported, using TPS2 instead\n");
4323 *DP |= DP_LINK_TRAIN_PAT_2;
4327 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
4328 intel_de_posting_read(dev_priv, intel_dp->output_reg);
4331 static void intel_dp_enable_port(struct intel_dp *intel_dp,
4332 const struct intel_crtc_state *crtc_state)
4334 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4336 /* enable with pattern 1 (as per spec) */
4338 intel_dp_program_link_training_pattern(intel_dp, crtc_state,
4339 DP_TRAINING_PATTERN_1);
4342 * Magic for VLV/CHV. We _must_ first set up the register
4343 * without actually enabling the port, and then do another
4344 * write to enable the port. Otherwise link training will
4345 * fail when the power sequencer is freshly used for this port.
4347 intel_dp->DP |= DP_PORT_EN;
4348 if (crtc_state->has_audio)
4349 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
4351 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
4352 intel_de_posting_read(dev_priv, intel_dp->output_reg);
4355 void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
4356 const struct intel_crtc_state *crtc_state)
4358 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4361 if (intel_dp->dpcd[DP_DPCD_REV] < 0x13)
4364 if (!drm_dp_is_branch(intel_dp->dpcd))
4367 tmp = intel_dp->has_hdmi_sink ?
4368 DP_HDMI_DVI_OUTPUT_CONFIG : 0;
4370 if (drm_dp_dpcd_writeb(&intel_dp->aux,
4371 DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1)
4372 drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n",
4373 enableddisabled(intel_dp->has_hdmi_sink));
4375 tmp = intel_dp->dfp.ycbcr_444_to_420 ?
4376 DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
4378 if (drm_dp_dpcd_writeb(&intel_dp->aux,
4379 DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
4380 drm_dbg_kms(&i915->drm,
4381 "Failed to set protocol converter YCbCr 4:2:0 conversion mode to %s\n",
4382 enableddisabled(intel_dp->dfp.ycbcr_444_to_420));
4385 if (intel_dp->dfp.rgb_to_ycbcr) {
4389 * FIXME: Currently if userspace selects BT2020 or BT709, but PCON supports only
4390 * RGB->YCbCr for BT601 colorspace, we go ahead with BT601, as default.
4393 tmp = DP_CONVERSION_BT601_RGB_YCBCR_ENABLE;
4395 bt2020 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
4396 intel_dp->downstream_ports,
4397 DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
4398 bt709 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
4399 intel_dp->downstream_ports,
4400 DP_DS_HDMI_BT709_RGB_YCBCR_CONV);
4401 switch (crtc_state->infoframes.vsc.colorimetry) {
4402 case DP_COLORIMETRY_BT2020_RGB:
4403 case DP_COLORIMETRY_BT2020_YCC:
4405 tmp = DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE;
4407 case DP_COLORIMETRY_BT709_YCC:
4408 case DP_COLORIMETRY_XVYCC_709:
4410 tmp = DP_CONVERSION_BT709_RGB_YCBCR_ENABLE;
4417 if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0)
4418 drm_dbg_kms(&i915->drm,
4419 "Failed to set protocol converter RGB->YCbCr conversion mode to %s\n",
4420 enableddisabled(tmp ? true : false));
4423 static void intel_enable_dp(struct intel_atomic_state *state,
4424 struct intel_encoder *encoder,
4425 const struct intel_crtc_state *pipe_config,
4426 const struct drm_connector_state *conn_state)
4428 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4429 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4430 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
4431 u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg);
4432 enum pipe pipe = crtc->pipe;
4433 intel_wakeref_t wakeref;
4435 if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
4438 with_pps_lock(intel_dp, wakeref) {
4439 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4440 vlv_init_panel_power_sequencer(encoder, pipe_config);
4442 intel_dp_enable_port(intel_dp, pipe_config);
4444 edp_panel_vdd_on(intel_dp);
4445 edp_panel_on(intel_dp);
4446 edp_panel_vdd_off(intel_dp, true);
4449 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4450 unsigned int lane_mask = 0x0;
4452 if (IS_CHERRYVIEW(dev_priv))
4453 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
4455 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
4459 intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
4460 intel_dp_configure_protocol_converter(intel_dp, pipe_config);
4461 intel_dp_check_frl_training(intel_dp);
4462 intel_dp_pcon_dsc_configure(intel_dp, pipe_config);
4463 intel_dp_start_link_train(intel_dp, pipe_config);
4464 intel_dp_stop_link_train(intel_dp, pipe_config);
4466 if (pipe_config->has_audio) {
4467 drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n",
4469 intel_audio_codec_enable(encoder, pipe_config, conn_state);
4473 static void g4x_enable_dp(struct intel_atomic_state *state,
4474 struct intel_encoder *encoder,
4475 const struct intel_crtc_state *pipe_config,
4476 const struct drm_connector_state *conn_state)
4478 intel_enable_dp(state, encoder, pipe_config, conn_state);
4479 intel_edp_backlight_on(pipe_config, conn_state);
4482 static void vlv_enable_dp(struct intel_atomic_state *state,
4483 struct intel_encoder *encoder,
4484 const struct intel_crtc_state *pipe_config,
4485 const struct drm_connector_state *conn_state)
4487 intel_edp_backlight_on(pipe_config, conn_state);
4490 static void g4x_pre_enable_dp(struct intel_atomic_state *state,
4491 struct intel_encoder *encoder,
4492 const struct intel_crtc_state *pipe_config,
4493 const struct drm_connector_state *conn_state)
4495 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4496 enum port port = encoder->port;
4498 intel_dp_prepare(encoder, pipe_config);
4500 /* Only ilk+ has port A */
4502 ilk_edp_pll_on(intel_dp, pipe_config);
4505 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
4507 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4508 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
4509 enum pipe pipe = intel_dp->pps_pipe;
4510 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
4512 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
4514 if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
4517 edp_panel_vdd_off_sync(intel_dp);
4520 * VLV seems to get confused when multiple power sequencers
4521 * have the same port selected (even if only one has power/vdd
4522 * enabled). The failure manifests as vlv_wait_port_ready() failing
4523 * CHV on the other hand doesn't seem to mind having the same port
4524 * selected in multiple power sequencers, but let's clear the
4525 * port select always when logically disconnecting a power sequencer
4528 drm_dbg_kms(&dev_priv->drm,
4529 "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
4530 pipe_name(pipe), dig_port->base.base.base.id,
4531 dig_port->base.base.name);
4532 intel_de_write(dev_priv, pp_on_reg, 0);
4533 intel_de_posting_read(dev_priv, pp_on_reg);
4535 intel_dp->pps_pipe = INVALID_PIPE;
4538 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
4541 struct intel_encoder *encoder;
4543 lockdep_assert_held(&dev_priv->pps_mutex);
4545 for_each_intel_dp(&dev_priv->drm, encoder) {
4546 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4548 drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe,
4549 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
4550 pipe_name(pipe), encoder->base.base.id,
4551 encoder->base.name);
4553 if (intel_dp->pps_pipe != pipe)
4556 drm_dbg_kms(&dev_priv->drm,
4557 "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
4558 pipe_name(pipe), encoder->base.base.id,
4559 encoder->base.name);
4561 /* make sure vdd is off before we steal it */
4562 vlv_detach_power_sequencer(intel_dp);
4566 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
4567 const struct intel_crtc_state *crtc_state)
4569 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4570 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4571 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4573 lockdep_assert_held(&dev_priv->pps_mutex);
4575 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
4577 if (intel_dp->pps_pipe != INVALID_PIPE &&
4578 intel_dp->pps_pipe != crtc->pipe) {
4580 * If another power sequencer was being used on this
4581 * port previously make sure to turn off vdd there while
4582 * we still have control of it.
4584 vlv_detach_power_sequencer(intel_dp);
4588 * We may be stealing the power
4589 * sequencer from another port.
4591 vlv_steal_power_sequencer(dev_priv, crtc->pipe);
4593 intel_dp->active_pipe = crtc->pipe;
4595 if (!intel_dp_is_edp(intel_dp))
4598 /* now it's all ours */
4599 intel_dp->pps_pipe = crtc->pipe;
4601 drm_dbg_kms(&dev_priv->drm,
4602 "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
4603 pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
4604 encoder->base.name);
4606 /* init power sequencer on this pipe and port */
4607 intel_dp_init_panel_power_sequencer(intel_dp);
4608 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
4611 static void vlv_pre_enable_dp(struct intel_atomic_state *state,
4612 struct intel_encoder *encoder,
4613 const struct intel_crtc_state *pipe_config,
4614 const struct drm_connector_state *conn_state)
4616 vlv_phy_pre_encoder_enable(encoder, pipe_config);
4618 intel_enable_dp(state, encoder, pipe_config, conn_state);
4621 static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state,
4622 struct intel_encoder *encoder,
4623 const struct intel_crtc_state *pipe_config,
4624 const struct drm_connector_state *conn_state)
4626 intel_dp_prepare(encoder, pipe_config);
4628 vlv_phy_pre_pll_enable(encoder, pipe_config);
4631 static void chv_pre_enable_dp(struct intel_atomic_state *state,
4632 struct intel_encoder *encoder,
4633 const struct intel_crtc_state *pipe_config,
4634 const struct drm_connector_state *conn_state)
4636 chv_phy_pre_encoder_enable(encoder, pipe_config);
4638 intel_enable_dp(state, encoder, pipe_config, conn_state);
4640 /* Second common lane will stay alive on its own now */
4641 chv_phy_release_cl2_override(encoder);
4644 static void chv_dp_pre_pll_enable(struct intel_atomic_state *state,
4645 struct intel_encoder *encoder,
4646 const struct intel_crtc_state *pipe_config,
4647 const struct drm_connector_state *conn_state)
4649 intel_dp_prepare(encoder, pipe_config);
4651 chv_phy_pre_pll_enable(encoder, pipe_config);
4654 static void chv_dp_post_pll_disable(struct intel_atomic_state *state,
4655 struct intel_encoder *encoder,
4656 const struct intel_crtc_state *old_crtc_state,
4657 const struct drm_connector_state *old_conn_state)
4659 chv_phy_post_pll_disable(encoder, old_crtc_state);
4662 static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp,
4663 const struct intel_crtc_state *crtc_state)
4665 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
4668 static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp,
4669 const struct intel_crtc_state *crtc_state)
4671 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
4674 static u8 intel_dp_preemph_max_2(struct intel_dp *intel_dp)
4676 return DP_TRAIN_PRE_EMPH_LEVEL_2;
4679 static u8 intel_dp_preemph_max_3(struct intel_dp *intel_dp)
4681 return DP_TRAIN_PRE_EMPH_LEVEL_3;
4684 static void vlv_set_signal_levels(struct intel_dp *intel_dp,
4685 const struct intel_crtc_state *crtc_state)
4687 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4688 unsigned long demph_reg_value, preemph_reg_value,
4689 uniqtranscale_reg_value;
4690 u8 train_set = intel_dp->train_set[0];
4692 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
4693 case DP_TRAIN_PRE_EMPH_LEVEL_0:
4694 preemph_reg_value = 0x0004000;
4695 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4696 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4697 demph_reg_value = 0x2B405555;
4698 uniqtranscale_reg_value = 0x552AB83A;
4700 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4701 demph_reg_value = 0x2B404040;
4702 uniqtranscale_reg_value = 0x5548B83A;
4704 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4705 demph_reg_value = 0x2B245555;
4706 uniqtranscale_reg_value = 0x5560B83A;
4708 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
4709 demph_reg_value = 0x2B405555;
4710 uniqtranscale_reg_value = 0x5598DA3A;
4716 case DP_TRAIN_PRE_EMPH_LEVEL_1:
4717 preemph_reg_value = 0x0002000;
4718 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4719 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4720 demph_reg_value = 0x2B404040;
4721 uniqtranscale_reg_value = 0x5552B83A;
4723 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4724 demph_reg_value = 0x2B404848;
4725 uniqtranscale_reg_value = 0x5580B83A;
4727 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4728 demph_reg_value = 0x2B404040;
4729 uniqtranscale_reg_value = 0x55ADDA3A;
4735 case DP_TRAIN_PRE_EMPH_LEVEL_2:
4736 preemph_reg_value = 0x0000000;
4737 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4738 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4739 demph_reg_value = 0x2B305555;
4740 uniqtranscale_reg_value = 0x5570B83A;
4742 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4743 demph_reg_value = 0x2B2B4040;
4744 uniqtranscale_reg_value = 0x55ADDA3A;
4750 case DP_TRAIN_PRE_EMPH_LEVEL_3:
4751 preemph_reg_value = 0x0006000;
4752 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4753 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4754 demph_reg_value = 0x1B405555;
4755 uniqtranscale_reg_value = 0x55ADDA3A;
4765 vlv_set_phy_signal_level(encoder, crtc_state,
4766 demph_reg_value, preemph_reg_value,
4767 uniqtranscale_reg_value, 0);
4770 static void chv_set_signal_levels(struct intel_dp *intel_dp,
4771 const struct intel_crtc_state *crtc_state)
4773 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4774 u32 deemph_reg_value, margin_reg_value;
4775 bool uniq_trans_scale = false;
4776 u8 train_set = intel_dp->train_set[0];
4778 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
4779 case DP_TRAIN_PRE_EMPH_LEVEL_0:
4780 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4781 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4782 deemph_reg_value = 128;
4783 margin_reg_value = 52;
4785 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4786 deemph_reg_value = 128;
4787 margin_reg_value = 77;
4789 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4790 deemph_reg_value = 128;
4791 margin_reg_value = 102;
4793 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
4794 deemph_reg_value = 128;
4795 margin_reg_value = 154;
4796 uniq_trans_scale = true;
4802 case DP_TRAIN_PRE_EMPH_LEVEL_1:
4803 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4804 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4805 deemph_reg_value = 85;
4806 margin_reg_value = 78;
4808 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4809 deemph_reg_value = 85;
4810 margin_reg_value = 116;
4812 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4813 deemph_reg_value = 85;
4814 margin_reg_value = 154;
4820 case DP_TRAIN_PRE_EMPH_LEVEL_2:
4821 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4822 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4823 deemph_reg_value = 64;
4824 margin_reg_value = 104;
4826 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4827 deemph_reg_value = 64;
4828 margin_reg_value = 154;
4834 case DP_TRAIN_PRE_EMPH_LEVEL_3:
4835 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4836 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4837 deemph_reg_value = 43;
4838 margin_reg_value = 154;
4848 chv_set_phy_signal_level(encoder, crtc_state,
4849 deemph_reg_value, margin_reg_value,
4853 static u32 g4x_signal_levels(u8 train_set)
4855 u32 signal_levels = 0;
4857 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4858 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4860 signal_levels |= DP_VOLTAGE_0_4;
4862 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4863 signal_levels |= DP_VOLTAGE_0_6;
4865 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4866 signal_levels |= DP_VOLTAGE_0_8;
4868 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
4869 signal_levels |= DP_VOLTAGE_1_2;
4872 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
4873 case DP_TRAIN_PRE_EMPH_LEVEL_0:
4875 signal_levels |= DP_PRE_EMPHASIS_0;
4877 case DP_TRAIN_PRE_EMPH_LEVEL_1:
4878 signal_levels |= DP_PRE_EMPHASIS_3_5;
4880 case DP_TRAIN_PRE_EMPH_LEVEL_2:
4881 signal_levels |= DP_PRE_EMPHASIS_6;
4883 case DP_TRAIN_PRE_EMPH_LEVEL_3:
4884 signal_levels |= DP_PRE_EMPHASIS_9_5;
4887 return signal_levels;
4891 g4x_set_signal_levels(struct intel_dp *intel_dp,
4892 const struct intel_crtc_state *crtc_state)
4894 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4895 u8 train_set = intel_dp->train_set[0];
4898 signal_levels = g4x_signal_levels(train_set);
4900 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
4903 intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK);
4904 intel_dp->DP |= signal_levels;
4906 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
4907 intel_de_posting_read(dev_priv, intel_dp->output_reg);
4910 /* SNB CPU eDP voltage swing and pre-emphasis control */
4911 static u32 snb_cpu_edp_signal_levels(u8 train_set)
4913 u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
4914 DP_TRAIN_PRE_EMPHASIS_MASK);
4916 switch (signal_levels) {
4917 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4918 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4919 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
4920 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4921 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
4922 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4923 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4924 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
4925 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4926 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4927 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
4928 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4929 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4930 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
4932 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
4933 "0x%x\n", signal_levels);
4934 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
4939 snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
4940 const struct intel_crtc_state *crtc_state)
4942 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4943 u8 train_set = intel_dp->train_set[0];
4946 signal_levels = snb_cpu_edp_signal_levels(train_set);
4948 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
4951 intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
4952 intel_dp->DP |= signal_levels;
4954 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
4955 intel_de_posting_read(dev_priv, intel_dp->output_reg);
4958 /* IVB CPU eDP voltage swing and pre-emphasis control */
4959 static u32 ivb_cpu_edp_signal_levels(u8 train_set)
4961 u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
4962 DP_TRAIN_PRE_EMPHASIS_MASK);
4964 switch (signal_levels) {
4965 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4966 return EDP_LINK_TRAIN_400MV_0DB_IVB;
4967 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4968 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
4969 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4970 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4971 return EDP_LINK_TRAIN_400MV_6DB_IVB;
4973 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4974 return EDP_LINK_TRAIN_600MV_0DB_IVB;
4975 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4976 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
4978 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4979 return EDP_LINK_TRAIN_800MV_0DB_IVB;
4980 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4981 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
4984 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
4985 "0x%x\n", signal_levels);
4986 return EDP_LINK_TRAIN_500MV_0DB_IVB;
4991 ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
4992 const struct intel_crtc_state *crtc_state)
4994 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4995 u8 train_set = intel_dp->train_set[0];
4998 signal_levels = ivb_cpu_edp_signal_levels(train_set);
5000 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
5003 intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
5004 intel_dp->DP |= signal_levels;
5006 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
5007 intel_de_posting_read(dev_priv, intel_dp->output_reg);
5010 void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
5011 const struct intel_crtc_state *crtc_state)
5013 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5014 u8 train_set = intel_dp->train_set[0];
5016 drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
5017 train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
5018 train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "");
5019 drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n",
5020 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
5021 DP_TRAIN_PRE_EMPHASIS_SHIFT,
5022 train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
5025 intel_dp->set_signal_levels(intel_dp, crtc_state);
5029 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
5030 const struct intel_crtc_state *crtc_state,
5033 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5035 if ((intel_dp_training_pattern_symbol(dp_train_pat)) !=
5036 DP_TRAINING_PATTERN_DISABLE)
5037 drm_dbg_kms(&dev_priv->drm,
5038 "Using DP training pattern TPS%d\n",
5039 intel_dp_training_pattern_symbol(dp_train_pat));
5041 intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
5045 intel_dp_link_down(struct intel_encoder *encoder,
5046 const struct intel_crtc_state *old_crtc_state)
5048 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5049 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5050 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
5051 enum port port = encoder->port;
5052 u32 DP = intel_dp->DP;
5054 if (drm_WARN_ON(&dev_priv->drm,
5055 (intel_de_read(dev_priv, intel_dp->output_reg) &
5059 drm_dbg_kms(&dev_priv->drm, "\n");
5061 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
5062 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
5063 DP &= ~DP_LINK_TRAIN_MASK_CPT;
5064 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
5066 DP &= ~DP_LINK_TRAIN_MASK;
5067 DP |= DP_LINK_TRAIN_PAT_IDLE;
5069 intel_de_write(dev_priv, intel_dp->output_reg, DP);
5070 intel_de_posting_read(dev_priv, intel_dp->output_reg);
5072 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
5073 intel_de_write(dev_priv, intel_dp->output_reg, DP);
5074 intel_de_posting_read(dev_priv, intel_dp->output_reg);
5077 * HW workaround for IBX, we need to move the port
5078 * to transcoder A after disabling it to allow the
5079 * matching HDMI port to be enabled on transcoder A.
5081 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
5083 * We get CPU/PCH FIFO underruns on the other pipe when
5084 * doing the workaround. Sweep them under the rug.
5086 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
5087 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
5089 /* always enable with pattern 1 (as per spec) */
5090 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
5091 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
5092 DP_LINK_TRAIN_PAT_1;
5093 intel_de_write(dev_priv, intel_dp->output_reg, DP);
5094 intel_de_posting_read(dev_priv, intel_dp->output_reg);
5097 intel_de_write(dev_priv, intel_dp->output_reg, DP);
5098 intel_de_posting_read(dev_priv, intel_dp->output_reg);
5100 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
5101 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
5102 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
5105 msleep(intel_dp->panel_power_down_delay);
5109 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5110 intel_wakeref_t wakeref;
5112 with_pps_lock(intel_dp, wakeref)
5113 intel_dp->active_pipe = INVALID_PIPE;
5117 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
5121 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
5124 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
5127 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
5129 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5132 * Clear the cached register set to avoid using stale values
5133 * for the sinks that do not support DSC.
5135 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
5137 /* Clear fec_capable to avoid using stale values */
5138 intel_dp->fec_capable = 0;
5140 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
5141 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
5142 intel_dp->edp_dpcd[0] >= DP_EDP_14) {
5143 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
5145 sizeof(intel_dp->dsc_dpcd)) < 0)
5147 "Failed to read DPCD register 0x%x\n",
5150 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n",
5151 (int)sizeof(intel_dp->dsc_dpcd),
5152 intel_dp->dsc_dpcd);
5154 /* FEC is supported only on DP 1.4 */
5155 if (!intel_dp_is_edp(intel_dp) &&
5156 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
5157 &intel_dp->fec_capable) < 0)
5159 "Failed to read FEC DPCD register\n");
5161 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n",
5162 intel_dp->fec_capable);
5167 intel_edp_init_dpcd(struct intel_dp *intel_dp)
5169 struct drm_i915_private *dev_priv =
5170 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
5172 /* this function is meant to be called only once */
5173 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
5175 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0)
5178 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
5179 drm_dp_is_branch(intel_dp->dpcd));
5182 * Read the eDP display control registers.
5184 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
5185 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
5186 * set, but require eDP 1.4+ detection (e.g. for supported link rates
5187 * method). The display control registers should read zero if they're
5188 * not supported anyway.
5190 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
5191 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
5192 sizeof(intel_dp->edp_dpcd))
5193 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
5194 (int)sizeof(intel_dp->edp_dpcd),
5195 intel_dp->edp_dpcd);
5198 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
5199 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
5201 intel_psr_init_dpcd(intel_dp);
5203 /* Read the eDP 1.4+ supported link rates. */
5204 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
5205 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
5208 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
5209 sink_rates, sizeof(sink_rates));
5211 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
5212 int val = le16_to_cpu(sink_rates[i]);
5217 /* Value read multiplied by 200kHz gives the per-lane
5218 * link rate in kHz. The source rates are, however,
5219 * stored in terms of LS_Clk kHz. The full conversion
5220 * back to symbols is
5221 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
5223 intel_dp->sink_rates[i] = (val * 200) / 10;
5225 intel_dp->num_sink_rates = i;
5229 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
5230 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
5232 if (intel_dp->num_sink_rates)
5233 intel_dp->use_rate_select = true;
5235 intel_dp_set_sink_rates(intel_dp);
5237 intel_dp_set_common_rates(intel_dp);
5239 /* Read the eDP DSC DPCD registers */
5240 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
5241 intel_dp_get_dsc_sink_cap(intel_dp);
5244 * If needed, program our source OUI so we can make various Intel-specific AUX services
5245 * available (such as HDR backlight controls)
5247 intel_edp_init_source_oui(intel_dp, true);
5253 intel_dp_has_sink_count(struct intel_dp *intel_dp)
5255 if (!intel_dp->attached_connector)
5258 return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base,
5264 intel_dp_get_dpcd(struct intel_dp *intel_dp)
5268 intel_dp_lttpr_init(intel_dp);
5270 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd))
5274 * Don't clobber cached eDP rates. Also skip re-reading
5275 * the OUI/ID since we know it won't change.
5277 if (!intel_dp_is_edp(intel_dp)) {
5278 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
5279 drm_dp_is_branch(intel_dp->dpcd));
5281 intel_dp_set_sink_rates(intel_dp);
5282 intel_dp_set_common_rates(intel_dp);
5285 if (intel_dp_has_sink_count(intel_dp)) {
5286 ret = drm_dp_read_sink_count(&intel_dp->aux);
5291 * Sink count can change between short pulse hpd hence
5292 * a member variable in intel_dp will track any changes
5293 * between short pulse interrupts.
5295 intel_dp->sink_count = ret;
5298 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
5299 * a dongle is present but no display. Unless we require to know
5300 * if a dongle is present or not, we don't need to update
5301 * downstream port information. So, an early return here saves
5302 * time from performing other operations which are not required.
5304 if (!intel_dp->sink_count)
5308 return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd,
5309 intel_dp->downstream_ports) == 0;
5313 intel_dp_can_mst(struct intel_dp *intel_dp)
5315 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5317 return i915->params.enable_dp_mst &&
5318 intel_dp->can_mst &&
5319 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
5323 intel_dp_configure_mst(struct intel_dp *intel_dp)
5325 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5326 struct intel_encoder *encoder =
5327 &dp_to_dig_port(intel_dp)->base;
5328 bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
5330 drm_dbg_kms(&i915->drm,
5331 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
5332 encoder->base.base.id, encoder->base.name,
5333 yesno(intel_dp->can_mst), yesno(sink_can_mst),
5334 yesno(i915->params.enable_dp_mst));
5336 if (!intel_dp->can_mst)
5339 intel_dp->is_mst = sink_can_mst &&
5340 i915->params.enable_dp_mst;
5342 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5347 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
5349 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
5350 sink_irq_vector, DP_DPRX_ESI_LEN) ==
5355 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
5356 const struct drm_connector_state *conn_state)
5359 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
5360 * of Color Encoding Format and Content Color Gamut], in order to
5361 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP.
5363 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5366 switch (conn_state->colorspace) {
5367 case DRM_MODE_COLORIMETRY_SYCC_601:
5368 case DRM_MODE_COLORIMETRY_OPYCC_601:
5369 case DRM_MODE_COLORIMETRY_BT2020_YCC:
5370 case DRM_MODE_COLORIMETRY_BT2020_RGB:
5371 case DRM_MODE_COLORIMETRY_BT2020_CYCC:
5380 static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
5381 struct dp_sdp *sdp, size_t size)
5383 size_t length = sizeof(struct dp_sdp);
5388 memset(sdp, 0, size);
5391 * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119
5392 * VSC SDP Header Bytes
5394 sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */
5395 sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */
5396 sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
5397 sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
5400 * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as
5403 if (vsc->revision != 0x5)
5406 /* VSC SDP Payload for DB16 through DB18 */
5407 /* Pixel Encoding and Colorimetry Formats */
5408 sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */
5409 sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */
5416 sdp->db[17] = 0x1; /* DB17[3:0] */
5428 MISSING_CASE(vsc->bpc);
5431 /* Dynamic Range and Component Bit Depth */
5432 if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA)
5433 sdp->db[17] |= 0x80; /* DB17[7] */
5436 sdp->db[18] = vsc->content_type & 0x7;
5443 intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe,
5447 size_t length = sizeof(struct dp_sdp);
5448 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
5449 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
5455 memset(sdp, 0, size);
5457 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf));
5459 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n");
5463 if (len != infoframe_size) {
5464 DRM_DEBUG_KMS("wrong static hdr metadata size\n");
5469 * Set up the infoframe sdp packet for HDR static metadata.
5470 * Prepare VSC Header for SU as per DP 1.4a spec,
5471 * Table 2-100 and Table 2-101
5474 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */
5475 sdp->sdp_header.HB0 = 0;
5477 * Packet Type 80h + Non-audio INFOFRAME Type value
5478 * HDMI_INFOFRAME_TYPE_DRM: 0x87
5479 * - 80h + Non-audio INFOFRAME Type value
5480 * - InfoFrame Type: 0x07
5481 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame]
5483 sdp->sdp_header.HB1 = drm_infoframe->type;
5485 * Least Significant Eight Bits of (Data Byte Count – 1)
5486 * infoframe_size - 1
5488 sdp->sdp_header.HB2 = 0x1D;
5489 /* INFOFRAME SDP Version Number */
5490 sdp->sdp_header.HB3 = (0x13 << 2);
5491 /* CTA Header Byte 2 (INFOFRAME Version Number) */
5492 sdp->db[0] = drm_infoframe->version;
5493 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
5494 sdp->db[1] = drm_infoframe->length;
5496 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
5497 * HDMI_INFOFRAME_HEADER_SIZE
5499 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2);
5500 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
5501 HDMI_DRM_INFOFRAME_SIZE);
5504 * Size of DP infoframe sdp packet for HDR static metadata consists of
5505 * - DP SDP Header(struct dp_sdp_header): 4 bytes
5506 * - Two Data Blocks: 2 bytes
5507 * CTA Header Byte2 (INFOFRAME Version Number)
5508 * CTA Header Byte3 (Length of INFOFRAME)
5509 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes
5511 * Prior to GEN11's GMP register size is identical to DP HDR static metadata
5512 * infoframe size. But GEN11+ has larger than that size, write_infoframe
5513 * will pad rest of the size.
5515 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE;
5518 static void intel_write_dp_sdp(struct intel_encoder *encoder,
5519 const struct intel_crtc_state *crtc_state,
5522 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
5523 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5524 struct dp_sdp sdp = {};
5527 if ((crtc_state->infoframes.enable &
5528 intel_hdmi_infoframe_enable(type)) == 0)
5533 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp,
5536 case HDMI_PACKET_TYPE_GAMUT_METADATA:
5537 len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm,
5545 if (drm_WARN_ON(&dev_priv->drm, len < 0))
5548 dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
5551 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
5552 const struct intel_crtc_state *crtc_state,
5553 struct drm_dp_vsc_sdp *vsc)
5555 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
5556 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5557 struct dp_sdp sdp = {};
5560 len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp));
5562 if (drm_WARN_ON(&dev_priv->drm, len < 0))
5565 dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
5569 void intel_dp_set_infoframes(struct intel_encoder *encoder,
5571 const struct intel_crtc_state *crtc_state,
5572 const struct drm_connector_state *conn_state)
5574 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5575 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5576 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
5577 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
5578 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
5579 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
5580 u32 val = intel_de_read(dev_priv, reg);
5582 /* TODO: Add DSC case (DIP_ENABLE_PPS) */
5583 /* When PSR is enabled, this routine doesn't disable VSC DIP */
5584 if (intel_psr_enabled(intel_dp))
5587 val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW);
5590 intel_de_write(dev_priv, reg, val);
5591 intel_de_posting_read(dev_priv, reg);
5595 intel_de_write(dev_priv, reg, val);
5596 intel_de_posting_read(dev_priv, reg);
5598 /* When PSR is enabled, VSC SDP is handled by PSR routine */
5599 if (!intel_psr_enabled(intel_dp))
5600 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
5602 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
5605 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
5606 const void *buffer, size_t size)
5608 const struct dp_sdp *sdp = buffer;
5610 if (size < sizeof(struct dp_sdp))
5613 memset(vsc, 0, size);
5615 if (sdp->sdp_header.HB0 != 0)
5618 if (sdp->sdp_header.HB1 != DP_SDP_VSC)
5621 vsc->sdp_type = sdp->sdp_header.HB1;
5622 vsc->revision = sdp->sdp_header.HB2;
5623 vsc->length = sdp->sdp_header.HB3;
5625 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) ||
5626 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) {
5628 * - HB2 = 0x2, HB3 = 0x8
5629 * VSC SDP supporting 3D stereo + PSR
5630 * - HB2 = 0x4, HB3 = 0xe
5631 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of
5632 * first scan line of the SU region (applies to eDP v1.4b
5636 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) {
5638 * - HB2 = 0x5, HB3 = 0x13
5639 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry
5642 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf;
5643 vsc->colorimetry = sdp->db[16] & 0xf;
5644 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1;
5646 switch (sdp->db[17] & 0x7) {
5663 MISSING_CASE(sdp->db[17] & 0x7);
5667 vsc->content_type = sdp->db[18] & 0x7;
5676 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe,
5677 const void *buffer, size_t size)
5681 const struct dp_sdp *sdp = buffer;
5683 if (size < sizeof(struct dp_sdp))
5686 if (sdp->sdp_header.HB0 != 0)
5689 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM)
5693 * Least Significant Eight Bits of (Data Byte Count – 1)
5694 * 1Dh (i.e., Data Byte Count = 30 bytes).
5696 if (sdp->sdp_header.HB2 != 0x1D)
5699 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */
5700 if ((sdp->sdp_header.HB3 & 0x3) != 0)
5703 /* INFOFRAME SDP Version Number */
5704 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13)
5707 /* CTA Header Byte 2 (INFOFRAME Version Number) */
5708 if (sdp->db[0] != 1)
5711 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
5712 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE)
5715 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2],
5716 HDMI_DRM_INFOFRAME_SIZE);
5721 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
5722 struct intel_crtc_state *crtc_state,
5723 struct drm_dp_vsc_sdp *vsc)
5725 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
5726 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5727 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5728 unsigned int type = DP_SDP_VSC;
5729 struct dp_sdp sdp = {};
5732 /* When PSR is enabled, VSC SDP is handled by PSR routine */
5733 if (intel_psr_enabled(intel_dp))
5736 if ((crtc_state->infoframes.enable &
5737 intel_hdmi_infoframe_enable(type)) == 0)
5740 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
5742 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));
5745 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n");
5748 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder,
5749 struct intel_crtc_state *crtc_state,
5750 struct hdmi_drm_infoframe *drm_infoframe)
5752 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
5753 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5754 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
5755 struct dp_sdp sdp = {};
5758 if ((crtc_state->infoframes.enable &
5759 intel_hdmi_infoframe_enable(type)) == 0)
5762 dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
5765 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp,
5769 drm_dbg_kms(&dev_priv->drm,
5770 "Failed to unpack DP HDR Metadata Infoframe SDP\n");
5773 void intel_read_dp_sdp(struct intel_encoder *encoder,
5774 struct intel_crtc_state *crtc_state,
5777 if (encoder->type != INTEL_OUTPUT_DDI)
5782 intel_read_dp_vsc_sdp(encoder, crtc_state,
5783 &crtc_state->infoframes.vsc);
5785 case HDMI_PACKET_TYPE_GAMUT_METADATA:
5786 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state,
5787 &crtc_state->infoframes.drm.drm);
5795 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
5797 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5800 u8 test_lane_count, test_link_bw;
5804 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
5805 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
5809 drm_dbg_kms(&i915->drm, "Lane count read failed\n");
5812 test_lane_count &= DP_MAX_LANE_COUNT_MASK;
5814 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
5817 drm_dbg_kms(&i915->drm, "Link Rate read failed\n");
5820 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
5822 /* Validate the requested link rate and lane count */
5823 if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
5827 intel_dp->compliance.test_lane_count = test_lane_count;
5828 intel_dp->compliance.test_link_rate = test_link_rate;
5833 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
5835 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5838 __be16 h_width, v_height;
5841 /* Read the TEST_PATTERN (DP CTS 3.1.5) */
5842 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
5845 drm_dbg_kms(&i915->drm, "Test pattern read failed\n");
5848 if (test_pattern != DP_COLOR_RAMP)
5851 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
5854 drm_dbg_kms(&i915->drm, "H Width read failed\n");
5858 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
5861 drm_dbg_kms(&i915->drm, "V Height read failed\n");
5865 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
5868 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n");
5871 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
5873 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
5875 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
5876 case DP_TEST_BIT_DEPTH_6:
5877 intel_dp->compliance.test_data.bpc = 6;
5879 case DP_TEST_BIT_DEPTH_8:
5880 intel_dp->compliance.test_data.bpc = 8;
5886 intel_dp->compliance.test_data.video_pattern = test_pattern;
5887 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
5888 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
5889 /* Set test active flag here so userspace doesn't interrupt things */
5890 intel_dp->compliance.test_active = true;
5895 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
5897 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5898 u8 test_result = DP_TEST_ACK;
5899 struct intel_connector *intel_connector = intel_dp->attached_connector;
5900 struct drm_connector *connector = &intel_connector->base;
5902 if (intel_connector->detect_edid == NULL ||
5903 connector->edid_corrupt ||
5904 intel_dp->aux.i2c_defer_count > 6) {
5905 /* Check EDID read for NACKs, DEFERs and corruption
5906 * (DP CTS 1.2 Core r1.1)
5907 * 4.2.2.4 : Failed EDID read, I2C_NAK
5908 * 4.2.2.5 : Failed EDID read, I2C_DEFER
5909 * 4.2.2.6 : EDID corruption detected
5910 * Use failsafe mode for all cases
5912 if (intel_dp->aux.i2c_nack_count > 0 ||
5913 intel_dp->aux.i2c_defer_count > 0)
5914 drm_dbg_kms(&i915->drm,
5915 "EDID read had %d NACKs, %d DEFERs\n",
5916 intel_dp->aux.i2c_nack_count,
5917 intel_dp->aux.i2c_defer_count);
5918 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
5920 struct edid *block = intel_connector->detect_edid;
5922 /* We have to write the checksum
5923 * of the last block read
5925 block += intel_connector->detect_edid->extensions;
5927 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
5928 block->checksum) <= 0)
5929 drm_dbg_kms(&i915->drm,
5930 "Failed to write EDID checksum\n");
5932 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
5933 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
5936 /* Set test active flag here so userspace doesn't interrupt things */
5937 intel_dp->compliance.test_active = true;
5942 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
5943 const struct intel_crtc_state *crtc_state)
5945 struct drm_i915_private *dev_priv =
5946 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
5947 struct drm_dp_phy_test_params *data =
5948 &intel_dp->compliance.test_data.phytest;
5949 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5950 enum pipe pipe = crtc->pipe;
5953 switch (data->phy_pattern) {
5954 case DP_PHY_TEST_PATTERN_NONE:
5955 DRM_DEBUG_KMS("Disable Phy Test Pattern\n");
5956 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
5958 case DP_PHY_TEST_PATTERN_D10_2:
5959 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n");
5960 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5961 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
5963 case DP_PHY_TEST_PATTERN_ERROR_COUNT:
5964 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n");
5965 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5966 DDI_DP_COMP_CTL_ENABLE |
5967 DDI_DP_COMP_CTL_SCRAMBLED_0);
5969 case DP_PHY_TEST_PATTERN_PRBS7:
5970 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n");
5971 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5972 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
5974 case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
5976 * FIXME: Ideally pattern should come from DPCD 0x250. As
5977 * current firmware of DPR-100 could not set it, so hardcoding
5978 * now for complaince test.
5980 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
5981 pattern_val = 0x3e0f83e0;
5982 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val);
5983 pattern_val = 0x0f83e0f8;
5984 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val);
5985 pattern_val = 0x0000f83e;
5986 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val);
5987 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5988 DDI_DP_COMP_CTL_ENABLE |
5989 DDI_DP_COMP_CTL_CUSTOM80);
5991 case DP_PHY_TEST_PATTERN_CP2520:
5993 * FIXME: Ideally pattern should come from DPCD 0x24A. As
5994 * current firmware of DPR-100 could not set it, so hardcoding
5995 * now for complaince test.
5997 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n");
5999 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
6000 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
6004 WARN(1, "Invalid Phy Test Pattern\n");
6009 intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp,
6010 const struct intel_crtc_state *crtc_state)
6012 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6013 struct drm_device *dev = dig_port->base.base.dev;
6014 struct drm_i915_private *dev_priv = to_i915(dev);
6015 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
6016 enum pipe pipe = crtc->pipe;
6017 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
6019 trans_ddi_func_ctl_value = intel_de_read(dev_priv,
6020 TRANS_DDI_FUNC_CTL(pipe));
6021 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
6022 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
6024 trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE |
6025 TGL_TRANS_DDI_PORT_MASK);
6026 trans_conf_value &= ~PIPECONF_ENABLE;
6027 dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE;
6029 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
6030 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
6031 trans_ddi_func_ctl_value);
6032 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
6036 intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp,
6037 const struct intel_crtc_state *crtc_state)
6039 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6040 struct drm_device *dev = dig_port->base.base.dev;
6041 struct drm_i915_private *dev_priv = to_i915(dev);
6042 enum port port = dig_port->base.port;
6043 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
6044 enum pipe pipe = crtc->pipe;
6045 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
6047 trans_ddi_func_ctl_value = intel_de_read(dev_priv,
6048 TRANS_DDI_FUNC_CTL(pipe));
6049 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
6050 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
6052 trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE |
6053 TGL_TRANS_DDI_SELECT_PORT(port);
6054 trans_conf_value |= PIPECONF_ENABLE;
6055 dp_tp_ctl_value |= DP_TP_CTL_ENABLE;
6057 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
6058 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
6059 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
6060 trans_ddi_func_ctl_value);
6063 static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
6064 const struct intel_crtc_state *crtc_state)
6066 struct drm_dp_phy_test_params *data =
6067 &intel_dp->compliance.test_data.phytest;
6068 u8 link_status[DP_LINK_STATUS_SIZE];
6070 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
6072 DRM_DEBUG_KMS("failed to get link status\n");
6076 /* retrieve vswing & pre-emphasis setting */
6077 intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX,
6080 intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state);
6082 intel_dp_set_signal_levels(intel_dp, crtc_state);
6084 intel_dp_phy_pattern_update(intel_dp, crtc_state);
6086 intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state);
6088 drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
6089 link_status[DP_DPCD_REV]);
6092 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
6094 struct drm_dp_phy_test_params *data =
6095 &intel_dp->compliance.test_data.phytest;
6097 if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) {
6098 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n");
6102 /* Set test active flag here so userspace doesn't interrupt things */
6103 intel_dp->compliance.test_active = true;
6108 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
6110 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6111 u8 response = DP_TEST_NAK;
6115 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
6117 drm_dbg_kms(&i915->drm,
6118 "Could not read test request from sink\n");
6123 case DP_TEST_LINK_TRAINING:
6124 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n");
6125 response = intel_dp_autotest_link_training(intel_dp);
6127 case DP_TEST_LINK_VIDEO_PATTERN:
6128 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n");
6129 response = intel_dp_autotest_video_pattern(intel_dp);
6131 case DP_TEST_LINK_EDID_READ:
6132 drm_dbg_kms(&i915->drm, "EDID test requested\n");
6133 response = intel_dp_autotest_edid(intel_dp);
6135 case DP_TEST_LINK_PHY_TEST_PATTERN:
6136 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n");
6137 response = intel_dp_autotest_phy_pattern(intel_dp);
6140 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n",
6145 if (response & DP_TEST_ACK)
6146 intel_dp->compliance.test_type = request;
6149 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
6151 drm_dbg_kms(&i915->drm,
6152 "Could not write test response to sink\n");
6156 * intel_dp_check_mst_status - service any pending MST interrupts, check link status
6157 * @intel_dp: Intel DP struct
6159 * Read any pending MST interrupts, call MST core to handle these and ack the
6160 * interrupts. Check if the main and AUX link state is ok.
6163 * - %true if pending interrupts were serviced (or no interrupts were
6164 * pending) w/o detecting an error condition.
6165 * - %false if an error condition - like AUX failure or a loss of link - is
6166 * detected, which needs servicing from the hotplug work.
6169 intel_dp_check_mst_status(struct intel_dp *intel_dp)
6171 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6172 bool link_ok = true;
6174 drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
6177 u8 esi[DP_DPRX_ESI_LEN] = {};
6181 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
6182 drm_dbg_kms(&i915->drm,
6183 "failed to get ESI - device may have failed\n");
6189 /* check link status - esi[10] = 0x200c */
6190 if (intel_dp->active_mst_links > 0 && link_ok &&
6191 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
6192 drm_dbg_kms(&i915->drm,
6193 "channel EQ not ok, retraining\n");
6197 drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
6199 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
6203 for (retry = 0; retry < 3; retry++) {
6206 wret = drm_dp_dpcd_write(&intel_dp->aux,
6207 DP_SINK_COUNT_ESI+1,
6218 intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp)
6223 is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux);
6224 if (intel_dp->frl.is_trained && !is_active) {
6225 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0)
6228 buf &= ~DP_PCON_ENABLE_HDMI_LINK;
6229 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0)
6232 drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base);
6234 /* Restart FRL training or fall back to TMDS mode */
6235 intel_dp_check_frl_training(intel_dp);
6240 intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
6242 u8 link_status[DP_LINK_STATUS_SIZE];
6244 if (!intel_dp->link_trained)
6248 * While PSR source HW is enabled, it will control main-link sending
6249 * frames, enabling and disabling it so trying to do a retrain will fail
6250 * as the link would or not be on or it could mix training patterns
6251 * and frame data at the same time causing retrain to fail.
6252 * Also when exiting PSR, HW will retrain the link anyways fixing
6253 * any link status error.
6255 if (intel_psr_enabled(intel_dp))
6258 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
6263 * Validate the cached values of intel_dp->link_rate and
6264 * intel_dp->lane_count before attempting to retrain.
6266 * FIXME would be nice to user the crtc state here, but since
6267 * we need to call this from the short HPD handler that seems
6270 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
6271 intel_dp->lane_count))
6274 /* Retrain if Channel EQ or CR not ok */
6275 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
6278 static bool intel_dp_has_connector(struct intel_dp *intel_dp,
6279 const struct drm_connector_state *conn_state)
6281 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6282 struct intel_encoder *encoder;
6285 if (!conn_state->best_encoder)
6289 encoder = &dp_to_dig_port(intel_dp)->base;
6290 if (conn_state->best_encoder == &encoder->base)
6294 for_each_pipe(i915, pipe) {
6295 encoder = &intel_dp->mst_encoders[pipe]->base;
6296 if (conn_state->best_encoder == &encoder->base)
6303 static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp,
6304 struct drm_modeset_acquire_ctx *ctx,
6307 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6308 struct drm_connector_list_iter conn_iter;
6309 struct intel_connector *connector;
6314 if (!intel_dp_needs_link_retrain(intel_dp))
6317 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
6318 for_each_intel_connector_iter(connector, &conn_iter) {
6319 struct drm_connector_state *conn_state =
6320 connector->base.state;
6321 struct intel_crtc_state *crtc_state;
6322 struct intel_crtc *crtc;
6324 if (!intel_dp_has_connector(intel_dp, conn_state))
6327 crtc = to_intel_crtc(conn_state->crtc);
6331 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
6335 crtc_state = to_intel_crtc_state(crtc->base.state);
6337 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
6339 if (!crtc_state->hw.active)
6342 if (conn_state->commit &&
6343 !try_wait_for_completion(&conn_state->commit->hw_done))
6346 *crtc_mask |= drm_crtc_mask(&crtc->base);
6348 drm_connector_list_iter_end(&conn_iter);
6350 if (!intel_dp_needs_link_retrain(intel_dp))
6356 static bool intel_dp_is_connected(struct intel_dp *intel_dp)
6358 struct intel_connector *connector = intel_dp->attached_connector;
6360 return connector->base.status == connector_status_connected ||
6364 int intel_dp_retrain_link(struct intel_encoder *encoder,
6365 struct drm_modeset_acquire_ctx *ctx)
6367 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6368 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
6369 struct intel_crtc *crtc;
6373 if (!intel_dp_is_connected(intel_dp))
6376 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
6381 ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask);
6388 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n",
6389 encoder->base.base.id, encoder->base.name);
6391 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
6392 const struct intel_crtc_state *crtc_state =
6393 to_intel_crtc_state(crtc->base.state);
6395 /* Suppress underruns caused by re-training */
6396 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
6397 if (crtc_state->has_pch_encoder)
6398 intel_set_pch_fifo_underrun_reporting(dev_priv,
6399 intel_crtc_pch_transcoder(crtc), false);
6402 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
6403 const struct intel_crtc_state *crtc_state =
6404 to_intel_crtc_state(crtc->base.state);
6406 /* retrain on the MST master transcoder */
6407 if (INTEL_GEN(dev_priv) >= 12 &&
6408 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
6409 !intel_dp_mst_is_master_trans(crtc_state))
6412 intel_dp_check_frl_training(intel_dp);
6413 intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
6414 intel_dp_start_link_train(intel_dp, crtc_state);
6415 intel_dp_stop_link_train(intel_dp, crtc_state);
6419 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
6420 const struct intel_crtc_state *crtc_state =
6421 to_intel_crtc_state(crtc->base.state);
6423 /* Keep underrun reporting disabled until things are stable */
6424 intel_wait_for_vblank(dev_priv, crtc->pipe);
6426 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
6427 if (crtc_state->has_pch_encoder)
6428 intel_set_pch_fifo_underrun_reporting(dev_priv,
6429 intel_crtc_pch_transcoder(crtc), true);
6435 static int intel_dp_prep_phy_test(struct intel_dp *intel_dp,
6436 struct drm_modeset_acquire_ctx *ctx,
6439 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6440 struct drm_connector_list_iter conn_iter;
6441 struct intel_connector *connector;
6446 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
6447 for_each_intel_connector_iter(connector, &conn_iter) {
6448 struct drm_connector_state *conn_state =
6449 connector->base.state;
6450 struct intel_crtc_state *crtc_state;
6451 struct intel_crtc *crtc;
6453 if (!intel_dp_has_connector(intel_dp, conn_state))
6456 crtc = to_intel_crtc(conn_state->crtc);
6460 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
6464 crtc_state = to_intel_crtc_state(crtc->base.state);
6466 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
6468 if (!crtc_state->hw.active)
6471 if (conn_state->commit &&
6472 !try_wait_for_completion(&conn_state->commit->hw_done))
6475 *crtc_mask |= drm_crtc_mask(&crtc->base);
6477 drm_connector_list_iter_end(&conn_iter);
6482 static int intel_dp_do_phy_test(struct intel_encoder *encoder,
6483 struct drm_modeset_acquire_ctx *ctx)
6485 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6486 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
6487 struct intel_crtc *crtc;
6491 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
6496 ret = intel_dp_prep_phy_test(intel_dp, ctx, &crtc_mask);
6503 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n",
6504 encoder->base.base.id, encoder->base.name);
6506 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
6507 const struct intel_crtc_state *crtc_state =
6508 to_intel_crtc_state(crtc->base.state);
6510 /* test on the MST master transcoder */
6511 if (INTEL_GEN(dev_priv) >= 12 &&
6512 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
6513 !intel_dp_mst_is_master_trans(crtc_state))
6516 intel_dp_process_phy_request(intel_dp, crtc_state);
6523 static void intel_dp_phy_test(struct intel_encoder *encoder)
6525 struct drm_modeset_acquire_ctx ctx;
6528 drm_modeset_acquire_init(&ctx, 0);
6531 ret = intel_dp_do_phy_test(encoder, &ctx);
6533 if (ret == -EDEADLK) {
6534 drm_modeset_backoff(&ctx);
6541 drm_modeset_drop_locks(&ctx);
6542 drm_modeset_acquire_fini(&ctx);
6543 drm_WARN(encoder->base.dev, ret,
6544 "Acquiring modeset locks failed with %i\n", ret);
6548 * If display is now connected check links status,
6549 * there has been known issues of link loss triggering
6552 * Some sinks (eg. ASUS PB287Q) seem to perform some
6553 * weird HPD ping pong during modesets. So we can apparently
6554 * end up with HPD going low during a modeset, and then
6555 * going back up soon after. And once that happens we must
6556 * retrain the link to get a picture. That's in case no
6557 * userspace component reacted to intermittent HPD dip.
6559 static enum intel_hotplug_state
6560 intel_dp_hotplug(struct intel_encoder *encoder,
6561 struct intel_connector *connector)
6563 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
6564 struct drm_modeset_acquire_ctx ctx;
6565 enum intel_hotplug_state state;
6568 if (intel_dp->compliance.test_active &&
6569 intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) {
6570 intel_dp_phy_test(encoder);
6571 /* just do the PHY test and nothing else */
6572 return INTEL_HOTPLUG_UNCHANGED;
6575 state = intel_encoder_hotplug(encoder, connector);
6577 drm_modeset_acquire_init(&ctx, 0);
6580 ret = intel_dp_retrain_link(encoder, &ctx);
6582 if (ret == -EDEADLK) {
6583 drm_modeset_backoff(&ctx);
6590 drm_modeset_drop_locks(&ctx);
6591 drm_modeset_acquire_fini(&ctx);
6592 drm_WARN(encoder->base.dev, ret,
6593 "Acquiring modeset locks failed with %i\n", ret);
6596 * Keeping it consistent with intel_ddi_hotplug() and
6597 * intel_hdmi_hotplug().
6599 if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries)
6600 state = INTEL_HOTPLUG_RETRY;
6605 static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
6607 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6610 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
6613 if (drm_dp_dpcd_readb(&intel_dp->aux,
6614 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
6617 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
6619 if (val & DP_AUTOMATED_TEST_REQUEST)
6620 intel_dp_handle_test_request(intel_dp);
6622 if (val & DP_CP_IRQ)
6623 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
6625 if (val & DP_SINK_SPECIFIC_IRQ)
6626 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
6629 static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
6631 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6634 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
6637 if (drm_dp_dpcd_readb(&intel_dp->aux,
6638 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) {
6639 drm_dbg_kms(&i915->drm, "Error in reading link service irq vector\n");
6643 if (drm_dp_dpcd_writeb(&intel_dp->aux,
6644 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) {
6645 drm_dbg_kms(&i915->drm, "Error in writing link service irq vector\n");
6649 if (val & HDMI_LINK_STATUS_CHANGED)
6650 intel_dp_handle_hdmi_link_status_change(intel_dp);
6654 * According to DP spec
6657 * 2. Configure link according to Receiver Capabilities
6658 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
6659 * 4. Check link status on receipt of hot-plug interrupt
6661 * intel_dp_short_pulse - handles short pulse interrupts
6662 * when full detection is not required.
6663 * Returns %true if short pulse is handled and full detection
6664 * is NOT required and %false otherwise.
6667 intel_dp_short_pulse(struct intel_dp *intel_dp)
6669 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6670 u8 old_sink_count = intel_dp->sink_count;
6674 * Clearing compliance test variables to allow capturing
6675 * of values for next automated test request.
6677 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
6680 * Now read the DPCD to see if it's actually running
6681 * If the current value of sink count doesn't match with
6682 * the value that was stored earlier or dpcd read failed
6683 * we need to do full detection
6685 ret = intel_dp_get_dpcd(intel_dp);
6687 if ((old_sink_count != intel_dp->sink_count) || !ret) {
6688 /* No need to proceed if we are going to do full detect */
6692 intel_dp_check_device_service_irq(intel_dp);
6693 intel_dp_check_link_service_irq(intel_dp);
6695 /* Handle CEC interrupts, if any */
6696 drm_dp_cec_irq(&intel_dp->aux);
6698 /* defer to the hotplug work for link retraining if needed */
6699 if (intel_dp_needs_link_retrain(intel_dp))
6702 intel_psr_short_pulse(intel_dp);
6704 switch (intel_dp->compliance.test_type) {
6705 case DP_TEST_LINK_TRAINING:
6706 drm_dbg_kms(&dev_priv->drm,
6707 "Link Training Compliance Test requested\n");
6708 /* Send a Hotplug Uevent to userspace to start modeset */
6709 drm_kms_helper_hotplug_event(&dev_priv->drm);
6711 case DP_TEST_LINK_PHY_TEST_PATTERN:
6712 drm_dbg_kms(&dev_priv->drm,
6713 "PHY test pattern Compliance Test requested\n");
6715 * Schedule long hpd to do the test
6717 * FIXME get rid of the ad-hoc phy test modeset code
6718 * and properly incorporate it into the normal modeset.
6726 /* XXX this is probably wrong for multiple downstream ports */
6727 static enum drm_connector_status
6728 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
6730 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6731 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6732 u8 *dpcd = intel_dp->dpcd;
6735 if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp)))
6736 return connector_status_connected;
6738 lspcon_resume(dig_port);
6740 if (!intel_dp_get_dpcd(intel_dp))
6741 return connector_status_disconnected;
6743 /* if there's no downstream port, we're done */
6744 if (!drm_dp_is_branch(dpcd))
6745 return connector_status_connected;
6747 /* If we're HPD-aware, SINK_COUNT changes dynamically */
6748 if (intel_dp_has_sink_count(intel_dp) &&
6749 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
6750 return intel_dp->sink_count ?
6751 connector_status_connected : connector_status_disconnected;
6754 if (intel_dp_can_mst(intel_dp))
6755 return connector_status_connected;
6757 /* If no HPD, poke DDC gently */
6758 if (drm_probe_ddc(&intel_dp->aux.ddc))
6759 return connector_status_connected;
6761 /* Well we tried, say unknown for unreliable port types */
6762 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
6763 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
6764 if (type == DP_DS_PORT_TYPE_VGA ||
6765 type == DP_DS_PORT_TYPE_NON_EDID)
6766 return connector_status_unknown;
6768 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
6769 DP_DWN_STRM_PORT_TYPE_MASK;
6770 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
6771 type == DP_DWN_STRM_PORT_TYPE_OTHER)
6772 return connector_status_unknown;
6775 /* Anything else is out of spec, warn and ignore */
6776 drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n");
6777 return connector_status_disconnected;
6780 static enum drm_connector_status
6781 edp_detect(struct intel_dp *intel_dp)
6783 return connector_status_connected;
6786 static bool ibx_digital_port_connected(struct intel_encoder *encoder)
6788 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6789 u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin];
6791 return intel_de_read(dev_priv, SDEISR) & bit;
6794 static bool g4x_digital_port_connected(struct intel_encoder *encoder)
6796 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6799 switch (encoder->hpd_pin) {
6801 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
6804 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
6807 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
6810 MISSING_CASE(encoder->hpd_pin);
6814 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
6817 static bool gm45_digital_port_connected(struct intel_encoder *encoder)
6819 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6822 switch (encoder->hpd_pin) {
6824 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
6827 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
6830 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
6833 MISSING_CASE(encoder->hpd_pin);
6837 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
6840 static bool ilk_digital_port_connected(struct intel_encoder *encoder)
6842 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6843 u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
6845 return intel_de_read(dev_priv, DEISR) & bit;
6849 * intel_digital_port_connected - is the specified port connected?
6850 * @encoder: intel_encoder
6852 * In cases where there's a connector physically connected but it can't be used
6853 * by our hardware we also return false, since the rest of the driver should
6854 * pretty much treat the port as disconnected. This is relevant for type-C
6855 * (starting on ICL) where there's ownership involved.
6857 * Return %true if port is connected, %false otherwise.
6859 bool intel_digital_port_connected(struct intel_encoder *encoder)
6861 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6862 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
6863 bool is_connected = false;
6864 intel_wakeref_t wakeref;
6866 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
6867 is_connected = dig_port->connected(encoder);
6869 return is_connected;
6872 static struct edid *
6873 intel_dp_get_edid(struct intel_dp *intel_dp)
6875 struct intel_connector *intel_connector = intel_dp->attached_connector;
6877 /* use cached edid if we have one */
6878 if (intel_connector->edid) {
6880 if (IS_ERR(intel_connector->edid))
6883 return drm_edid_duplicate(intel_connector->edid);
6885 return drm_get_edid(&intel_connector->base,
6886 &intel_dp->aux.ddc);
6890 intel_dp_update_dfp(struct intel_dp *intel_dp,
6891 const struct edid *edid)
6893 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6894 struct intel_connector *connector = intel_dp->attached_connector;
6896 intel_dp->dfp.max_bpc =
6897 drm_dp_downstream_max_bpc(intel_dp->dpcd,
6898 intel_dp->downstream_ports, edid);
6900 intel_dp->dfp.max_dotclock =
6901 drm_dp_downstream_max_dotclock(intel_dp->dpcd,
6902 intel_dp->downstream_ports);
6904 intel_dp->dfp.min_tmds_clock =
6905 drm_dp_downstream_min_tmds_clock(intel_dp->dpcd,
6906 intel_dp->downstream_ports,
6908 intel_dp->dfp.max_tmds_clock =
6909 drm_dp_downstream_max_tmds_clock(intel_dp->dpcd,
6910 intel_dp->downstream_ports,
6913 intel_dp->dfp.pcon_max_frl_bw =
6914 drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd,
6915 intel_dp->downstream_ports);
6917 drm_dbg_kms(&i915->drm,
6918 "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n",
6919 connector->base.base.id, connector->base.name,
6920 intel_dp->dfp.max_bpc,
6921 intel_dp->dfp.max_dotclock,
6922 intel_dp->dfp.min_tmds_clock,
6923 intel_dp->dfp.max_tmds_clock,
6924 intel_dp->dfp.pcon_max_frl_bw);
6926 intel_dp_get_pcon_dsc_cap(intel_dp);
6930 intel_dp_update_420(struct intel_dp *intel_dp)
6932 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6933 struct intel_connector *connector = intel_dp->attached_connector;
6934 bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr;
6936 /* No YCbCr output support on gmch platforms */
6941 * ILK doesn't seem capable of DP YCbCr output. The
6942 * displayed image is severly corrupted. SNB+ is fine.
6944 if (IS_GEN(i915, 5))
6947 is_branch = drm_dp_is_branch(intel_dp->dpcd);
6948 ycbcr_420_passthrough =
6949 drm_dp_downstream_420_passthrough(intel_dp->dpcd,
6950 intel_dp->downstream_ports);
6951 /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */
6953 dp_to_dig_port(intel_dp)->lspcon.active ||
6954 drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd,
6955 intel_dp->downstream_ports);
6956 rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
6957 intel_dp->downstream_ports,
6958 DP_DS_HDMI_BT601_RGB_YCBCR_CONV ||
6959 DP_DS_HDMI_BT709_RGB_YCBCR_CONV ||
6960 DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
6962 if (INTEL_GEN(i915) >= 11) {
6963 /* Let PCON convert from RGB->YCbCr if possible */
6964 if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) {
6965 intel_dp->dfp.rgb_to_ycbcr = true;
6966 intel_dp->dfp.ycbcr_444_to_420 = true;
6967 connector->base.ycbcr_420_allowed = true;
6969 /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */
6970 intel_dp->dfp.ycbcr_444_to_420 =
6971 ycbcr_444_to_420 && !ycbcr_420_passthrough;
6973 connector->base.ycbcr_420_allowed =
6974 !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough;
6977 /* 4:4:4->4:2:0 conversion is the only way */
6978 intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420;
6980 connector->base.ycbcr_420_allowed = ycbcr_444_to_420;
6983 drm_dbg_kms(&i915->drm,
6984 "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
6985 connector->base.base.id, connector->base.name,
6986 yesno(intel_dp->dfp.rgb_to_ycbcr),
6987 yesno(connector->base.ycbcr_420_allowed),
6988 yesno(intel_dp->dfp.ycbcr_444_to_420));
6992 intel_dp_set_edid(struct intel_dp *intel_dp)
6994 struct intel_connector *connector = intel_dp->attached_connector;
6997 intel_dp_unset_edid(intel_dp);
6998 edid = intel_dp_get_edid(intel_dp);
6999 connector->detect_edid = edid;
7001 intel_dp_update_dfp(intel_dp, edid);
7002 intel_dp_update_420(intel_dp);
7004 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
7005 intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
7006 intel_dp->has_audio = drm_detect_monitor_audio(edid);
7009 drm_dp_cec_set_edid(&intel_dp->aux, edid);
7010 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
7014 intel_dp_unset_edid(struct intel_dp *intel_dp)
7016 struct intel_connector *connector = intel_dp->attached_connector;
7018 drm_dp_cec_unset_edid(&intel_dp->aux);
7019 kfree(connector->detect_edid);
7020 connector->detect_edid = NULL;
7022 intel_dp->has_hdmi_sink = false;
7023 intel_dp->has_audio = false;
7024 intel_dp->edid_quirks = 0;
7026 intel_dp->dfp.max_bpc = 0;
7027 intel_dp->dfp.max_dotclock = 0;
7028 intel_dp->dfp.min_tmds_clock = 0;
7029 intel_dp->dfp.max_tmds_clock = 0;
7031 intel_dp->dfp.pcon_max_frl_bw = 0;
7033 intel_dp->dfp.ycbcr_444_to_420 = false;
7034 connector->base.ycbcr_420_allowed = false;
7038 intel_dp_detect(struct drm_connector *connector,
7039 struct drm_modeset_acquire_ctx *ctx,
7042 struct drm_i915_private *dev_priv = to_i915(connector->dev);
7043 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
7044 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
7045 struct intel_encoder *encoder = &dig_port->base;
7046 enum drm_connector_status status;
7048 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
7049 connector->base.id, connector->name);
7050 drm_WARN_ON(&dev_priv->drm,
7051 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
7053 if (!INTEL_DISPLAY_ENABLED(dev_priv))
7054 return connector_status_disconnected;
7056 /* Can't disconnect eDP */
7057 if (intel_dp_is_edp(intel_dp))
7058 status = edp_detect(intel_dp);
7059 else if (intel_digital_port_connected(encoder))
7060 status = intel_dp_detect_dpcd(intel_dp);
7062 status = connector_status_disconnected;
7064 if (status == connector_status_disconnected) {
7065 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
7066 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
7068 if (intel_dp->is_mst) {
7069 drm_dbg_kms(&dev_priv->drm,
7070 "MST device may have disappeared %d vs %d\n",
7072 intel_dp->mst_mgr.mst_state);
7073 intel_dp->is_mst = false;
7074 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
7081 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
7082 if (INTEL_GEN(dev_priv) >= 11)
7083 intel_dp_get_dsc_sink_cap(intel_dp);
7085 intel_dp_configure_mst(intel_dp);
7088 * TODO: Reset link params when switching to MST mode, until MST
7089 * supports link training fallback params.
7091 if (intel_dp->reset_link_params || intel_dp->is_mst) {
7092 /* Initial max link lane count */
7093 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
7095 /* Initial max link rate */
7096 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
7098 intel_dp->reset_link_params = false;
7101 intel_dp_print_rates(intel_dp);
7103 if (intel_dp->is_mst) {
7105 * If we are in MST mode then this connector
7106 * won't appear connected or have anything
7109 status = connector_status_disconnected;
7114 * Some external monitors do not signal loss of link synchronization
7115 * with an IRQ_HPD, so force a link status check.
7117 if (!intel_dp_is_edp(intel_dp)) {
7120 ret = intel_dp_retrain_link(encoder, ctx);
7126 * Clearing NACK and defer counts to get their exact values
7127 * while reading EDID which are required by Compliance tests
7128 * 4.2.2.4 and 4.2.2.5
7130 intel_dp->aux.i2c_nack_count = 0;
7131 intel_dp->aux.i2c_defer_count = 0;
7133 intel_dp_set_edid(intel_dp);
7134 if (intel_dp_is_edp(intel_dp) ||
7135 to_intel_connector(connector)->detect_edid)
7136 status = connector_status_connected;
7138 intel_dp_check_device_service_irq(intel_dp);
7141 if (status != connector_status_connected && !intel_dp->is_mst)
7142 intel_dp_unset_edid(intel_dp);
7145 * Make sure the refs for power wells enabled during detect are
7146 * dropped to avoid a new detect cycle triggered by HPD polling.
7148 intel_display_power_flush_work(dev_priv);
7150 if (!intel_dp_is_edp(intel_dp))
7151 drm_dp_set_subconnector_property(connector,
7154 intel_dp->downstream_ports);
7159 intel_dp_force(struct drm_connector *connector)
7161 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
7162 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
7163 struct intel_encoder *intel_encoder = &dig_port->base;
7164 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
7165 enum intel_display_power_domain aux_domain =
7166 intel_aux_power_domain(dig_port);
7167 intel_wakeref_t wakeref;
7169 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
7170 connector->base.id, connector->name);
7171 intel_dp_unset_edid(intel_dp);
7173 if (connector->status != connector_status_connected)
7176 wakeref = intel_display_power_get(dev_priv, aux_domain);
7178 intel_dp_set_edid(intel_dp);
7180 intel_display_power_put(dev_priv, aux_domain, wakeref);
7183 static int intel_dp_get_modes(struct drm_connector *connector)
7185 struct intel_connector *intel_connector = to_intel_connector(connector);
7188 edid = intel_connector->detect_edid;
7190 int ret = intel_connector_update_modes(connector, edid);
7195 /* if eDP has no EDID, fall back to fixed mode */
7196 if (intel_dp_is_edp(intel_attached_dp(intel_connector)) &&
7197 intel_connector->panel.fixed_mode) {
7198 struct drm_display_mode *mode;
7200 mode = drm_mode_duplicate(connector->dev,
7201 intel_connector->panel.fixed_mode);
7203 drm_mode_probed_add(connector, mode);
7209 struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
7210 struct drm_display_mode *mode;
7212 mode = drm_dp_downstream_mode(connector->dev,
7214 intel_dp->downstream_ports);
7216 drm_mode_probed_add(connector, mode);
7225 intel_dp_connector_register(struct drm_connector *connector)
7227 struct drm_i915_private *i915 = to_i915(connector->dev);
7228 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
7229 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
7230 struct intel_lspcon *lspcon = &dig_port->lspcon;
7233 ret = intel_connector_register(connector);
7237 drm_dbg_kms(&i915->drm, "registering %s bus for %s\n",
7238 intel_dp->aux.name, connector->kdev->kobj.name);
7240 intel_dp->aux.dev = connector->kdev;
7241 ret = drm_dp_aux_register(&intel_dp->aux);
7243 drm_dp_cec_register_connector(&intel_dp->aux, connector);
7245 if (!intel_bios_is_lspcon_present(i915, dig_port->base.port))
7249 * ToDo: Clean this up to handle lspcon init and resume more
7250 * efficiently and streamlined.
7252 if (lspcon_init(dig_port)) {
7253 lspcon_detect_hdr_capability(lspcon);
7254 if (lspcon->hdr_supported)
7255 drm_object_attach_property(&connector->base,
7256 connector->dev->mode_config.hdr_output_metadata_property,
7264 intel_dp_connector_unregister(struct drm_connector *connector)
7266 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
7268 drm_dp_cec_unregister_connector(&intel_dp->aux);
7269 drm_dp_aux_unregister(&intel_dp->aux);
7270 intel_connector_unregister(connector);
7273 void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
7275 struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
7276 struct intel_dp *intel_dp = &dig_port->dp;
7278 intel_dp_mst_encoder_cleanup(dig_port);
7279 if (intel_dp_is_edp(intel_dp)) {
7280 intel_wakeref_t wakeref;
7282 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
7284 * vdd might still be enabled do to the delayed vdd off.
7285 * Make sure vdd is actually turned off here.
7287 with_pps_lock(intel_dp, wakeref)
7288 edp_panel_vdd_off_sync(intel_dp);
7291 intel_dp_aux_fini(intel_dp);
7294 static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
7296 intel_dp_encoder_flush_work(encoder);
7298 drm_encoder_cleanup(encoder);
7299 kfree(enc_to_dig_port(to_intel_encoder(encoder)));
7302 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
7304 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
7305 intel_wakeref_t wakeref;
7307 if (!intel_dp_is_edp(intel_dp))
7311 * vdd might still be enabled do to the delayed vdd off.
7312 * Make sure vdd is actually turned off here.
7314 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
7315 with_pps_lock(intel_dp, wakeref)
7316 edp_panel_vdd_off_sync(intel_dp);
7319 void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
7321 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
7322 intel_wakeref_t wakeref;
7324 if (!intel_dp_is_edp(intel_dp))
7327 with_pps_lock(intel_dp, wakeref)
7328 wait_panel_power_cycle(intel_dp);
7331 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
7333 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7334 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
7336 lockdep_assert_held(&dev_priv->pps_mutex);
7338 if (!edp_have_panel_vdd(intel_dp))
7342 * The VDD bit needs a power domain reference, so if the bit is
7343 * already enabled when we boot or resume, grab this reference and
7344 * schedule a vdd off, so we don't hold on to the reference
7347 drm_dbg_kms(&dev_priv->drm,
7348 "VDD left on by BIOS, adjusting state tracking\n");
7349 drm_WARN_ON(&dev_priv->drm, intel_dp->vdd_wakeref);
7350 intel_dp->vdd_wakeref = intel_display_power_get(dev_priv,
7351 intel_aux_power_domain(dig_port));
7353 edp_panel_vdd_schedule_off(intel_dp);
7356 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
7358 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7359 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
7362 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
7363 encoder->port, &pipe))
7366 return INVALID_PIPE;
7369 void intel_dp_encoder_reset(struct drm_encoder *encoder)
7371 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
7372 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
7373 intel_wakeref_t wakeref;
7375 if (!HAS_DDI(dev_priv))
7376 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
7378 intel_dp->reset_link_params = true;
7380 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
7381 !intel_dp_is_edp(intel_dp))
7384 with_pps_lock(intel_dp, wakeref) {
7385 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7386 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
7388 if (intel_dp_is_edp(intel_dp)) {
7390 * Reinit the power sequencer, in case BIOS did
7391 * something nasty with it.
7393 intel_dp_pps_init(intel_dp);
7394 intel_edp_panel_vdd_sanitize(intel_dp);
7399 static int intel_modeset_tile_group(struct intel_atomic_state *state,
7402 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7403 struct drm_connector_list_iter conn_iter;
7404 struct drm_connector *connector;
7407 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
7408 drm_for_each_connector_iter(connector, &conn_iter) {
7409 struct drm_connector_state *conn_state;
7410 struct intel_crtc_state *crtc_state;
7411 struct intel_crtc *crtc;
7413 if (!connector->has_tile ||
7414 connector->tile_group->id != tile_group_id)
7417 conn_state = drm_atomic_get_connector_state(&state->base,
7419 if (IS_ERR(conn_state)) {
7420 ret = PTR_ERR(conn_state);
7424 crtc = to_intel_crtc(conn_state->crtc);
7429 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
7430 crtc_state->uapi.mode_changed = true;
7432 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
7436 drm_connector_list_iter_end(&conn_iter);
7441 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders)
7443 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7444 struct intel_crtc *crtc;
7446 if (transcoders == 0)
7449 for_each_intel_crtc(&dev_priv->drm, crtc) {
7450 struct intel_crtc_state *crtc_state;
7453 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
7454 if (IS_ERR(crtc_state))
7455 return PTR_ERR(crtc_state);
7457 if (!crtc_state->hw.enable)
7460 if (!(transcoders & BIT(crtc_state->cpu_transcoder)))
7463 crtc_state->uapi.mode_changed = true;
7465 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
7469 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
7473 transcoders &= ~BIT(crtc_state->cpu_transcoder);
7476 drm_WARN_ON(&dev_priv->drm, transcoders != 0);
7481 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
7482 struct drm_connector *connector)
7484 const struct drm_connector_state *old_conn_state =
7485 drm_atomic_get_old_connector_state(&state->base, connector);
7486 const struct intel_crtc_state *old_crtc_state;
7487 struct intel_crtc *crtc;
7490 crtc = to_intel_crtc(old_conn_state->crtc);
7494 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
7496 if (!old_crtc_state->hw.active)
7499 transcoders = old_crtc_state->sync_mode_slaves_mask;
7500 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER)
7501 transcoders |= BIT(old_crtc_state->master_transcoder);
7503 return intel_modeset_affected_transcoders(state,
7507 static int intel_dp_connector_atomic_check(struct drm_connector *conn,
7508 struct drm_atomic_state *_state)
7510 struct drm_i915_private *dev_priv = to_i915(conn->dev);
7511 struct intel_atomic_state *state = to_intel_atomic_state(_state);
7514 ret = intel_digital_connector_atomic_check(conn, &state->base);
7519 * We don't enable port sync on BDW due to missing w/as and
7520 * due to not having adjusted the modeset sequence appropriately.
7522 if (INTEL_GEN(dev_priv) < 9)
7525 if (!intel_connector_needs_modeset(state, conn))
7528 if (conn->has_tile) {
7529 ret = intel_modeset_tile_group(state, conn->tile_group->id);
7534 return intel_modeset_synced_crtcs(state, conn);
7537 static const struct drm_connector_funcs intel_dp_connector_funcs = {
7538 .force = intel_dp_force,
7539 .fill_modes = drm_helper_probe_single_connector_modes,
7540 .atomic_get_property = intel_digital_connector_atomic_get_property,
7541 .atomic_set_property = intel_digital_connector_atomic_set_property,
7542 .late_register = intel_dp_connector_register,
7543 .early_unregister = intel_dp_connector_unregister,
7544 .destroy = intel_connector_destroy,
7545 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
7546 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
7549 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
7550 .detect_ctx = intel_dp_detect,
7551 .get_modes = intel_dp_get_modes,
7552 .mode_valid = intel_dp_mode_valid,
7553 .atomic_check = intel_dp_connector_atomic_check,
7556 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
7557 .reset = intel_dp_encoder_reset,
7558 .destroy = intel_dp_encoder_destroy,
7561 static bool intel_edp_have_power(struct intel_dp *intel_dp)
7563 intel_wakeref_t wakeref;
7564 bool have_power = false;
7566 with_pps_lock(intel_dp, wakeref) {
7567 have_power = edp_have_panel_power(intel_dp) &&
7568 edp_have_panel_vdd(intel_dp);
7575 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
7577 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
7578 struct intel_dp *intel_dp = &dig_port->dp;
7580 if (dig_port->base.type == INTEL_OUTPUT_EDP &&
7581 (long_hpd || !intel_edp_have_power(intel_dp))) {
7583 * vdd off can generate a long/short pulse on eDP which
7584 * would require vdd on to handle it, and thus we
7585 * would end up in an endless cycle of
7586 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..."
7588 drm_dbg_kms(&i915->drm,
7589 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
7590 long_hpd ? "long" : "short",
7591 dig_port->base.base.base.id,
7592 dig_port->base.base.name);
7596 drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
7597 dig_port->base.base.base.id,
7598 dig_port->base.base.name,
7599 long_hpd ? "long" : "short");
7602 intel_dp->reset_link_params = true;
7606 if (intel_dp->is_mst) {
7607 if (!intel_dp_check_mst_status(intel_dp))
7609 } else if (!intel_dp_short_pulse(intel_dp)) {
7616 /* check the VBT to see whether the eDP is on another port */
7617 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
7620 * eDP not supported on g4x. so bail out early just
7621 * for a bit extra safety in case the VBT is bonkers.
7623 if (INTEL_GEN(dev_priv) < 5)
7626 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
7629 return intel_bios_is_port_edp(dev_priv, port);
7633 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
7635 struct drm_i915_private *dev_priv = to_i915(connector->dev);
7636 enum port port = dp_to_dig_port(intel_dp)->base.port;
7638 if (!intel_dp_is_edp(intel_dp))
7639 drm_connector_attach_dp_subconnector_property(connector);
7641 if (!IS_G4X(dev_priv) && port != PORT_A)
7642 intel_attach_force_audio_property(connector);
7644 intel_attach_broadcast_rgb_property(connector);
7645 if (HAS_GMCH(dev_priv))
7646 drm_connector_attach_max_bpc_property(connector, 6, 10);
7647 else if (INTEL_GEN(dev_priv) >= 5)
7648 drm_connector_attach_max_bpc_property(connector, 6, 12);
7650 /* Register HDMI colorspace for case of lspcon */
7651 if (intel_bios_is_lspcon_present(dev_priv, port)) {
7652 drm_connector_attach_content_type_property(connector);
7653 intel_attach_hdmi_colorspace_property(connector);
7655 intel_attach_dp_colorspace_property(connector);
7658 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11)
7659 drm_object_attach_property(&connector->base,
7660 connector->dev->mode_config.hdr_output_metadata_property,
7663 if (intel_dp_is_edp(intel_dp)) {
7664 u32 allowed_scalers;
7666 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
7667 if (!HAS_GMCH(dev_priv))
7668 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
7670 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
7672 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
7677 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
7679 intel_dp->panel_power_off_time = ktime_get_boottime();
7680 intel_dp->last_power_on = jiffies;
7681 intel_dp->last_backlight_off = jiffies;
7685 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
7687 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7688 u32 pp_on, pp_off, pp_ctl;
7689 struct pps_registers regs;
7691 intel_pps_get_registers(intel_dp, ®s);
7693 pp_ctl = ilk_get_pp_control(intel_dp);
7695 /* Ensure PPS is unlocked */
7696 if (!HAS_DDI(dev_priv))
7697 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
7699 pp_on = intel_de_read(dev_priv, regs.pp_on);
7700 pp_off = intel_de_read(dev_priv, regs.pp_off);
7702 /* Pull timing values out of registers */
7703 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
7704 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
7705 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
7706 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
7708 if (i915_mmio_reg_valid(regs.pp_div)) {
7711 pp_div = intel_de_read(dev_priv, regs.pp_div);
7713 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
7715 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
7720 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
7722 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
7724 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
7728 intel_pps_verify_state(struct intel_dp *intel_dp)
7730 struct edp_power_seq hw;
7731 struct edp_power_seq *sw = &intel_dp->pps_delays;
7733 intel_pps_readout_hw_state(intel_dp, &hw);
7735 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
7736 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
7737 DRM_ERROR("PPS state mismatch\n");
7738 intel_pps_dump_state("sw", sw);
7739 intel_pps_dump_state("hw", &hw);
7744 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
7746 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7747 struct edp_power_seq cur, vbt, spec,
7748 *final = &intel_dp->pps_delays;
7750 lockdep_assert_held(&dev_priv->pps_mutex);
7752 /* already initialized? */
7753 if (final->t11_t12 != 0)
7756 intel_pps_readout_hw_state(intel_dp, &cur);
7758 intel_pps_dump_state("cur", &cur);
7760 vbt = dev_priv->vbt.edp.pps;
7761 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
7762 * of 500ms appears to be too short. Ocassionally the panel
7763 * just fails to power back on. Increasing the delay to 800ms
7764 * seems sufficient to avoid this problem.
7766 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
7767 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
7768 drm_dbg_kms(&dev_priv->drm,
7769 "Increasing T12 panel delay as per the quirk to %d\n",
7772 /* T11_T12 delay is special and actually in units of 100ms, but zero
7773 * based in the hw (so we need to add 100 ms). But the sw vbt
7774 * table multiplies it with 1000 to make it in units of 100usec,
7776 vbt.t11_t12 += 100 * 10;
7778 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
7779 * our hw here, which are all in 100usec. */
7780 spec.t1_t3 = 210 * 10;
7781 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
7782 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
7783 spec.t10 = 500 * 10;
7784 /* This one is special and actually in units of 100ms, but zero
7785 * based in the hw (so we need to add 100 ms). But the sw vbt
7786 * table multiplies it with 1000 to make it in units of 100usec,
7788 spec.t11_t12 = (510 + 100) * 10;
7790 intel_pps_dump_state("vbt", &vbt);
7792 /* Use the max of the register settings and vbt. If both are
7793 * unset, fall back to the spec limits. */
7794 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
7796 max(cur.field, vbt.field))
7797 assign_final(t1_t3);
7801 assign_final(t11_t12);
7804 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
7805 intel_dp->panel_power_up_delay = get_delay(t1_t3);
7806 intel_dp->backlight_on_delay = get_delay(t8);
7807 intel_dp->backlight_off_delay = get_delay(t9);
7808 intel_dp->panel_power_down_delay = get_delay(t10);
7809 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
7812 drm_dbg_kms(&dev_priv->drm,
7813 "panel power up delay %d, power down delay %d, power cycle delay %d\n",
7814 intel_dp->panel_power_up_delay,
7815 intel_dp->panel_power_down_delay,
7816 intel_dp->panel_power_cycle_delay);
7818 drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
7819 intel_dp->backlight_on_delay,
7820 intel_dp->backlight_off_delay);
7823 * We override the HW backlight delays to 1 because we do manual waits
7824 * on them. For T8, even BSpec recommends doing it. For T9, if we
7825 * don't do this, we'll end up waiting for the backlight off delay
7826 * twice: once when we do the manual sleep, and once when we disable
7827 * the panel and wait for the PP_STATUS bit to become zero.
7833 * HW has only a 100msec granularity for t11_t12 so round it up
7836 final->t11_t12 = roundup(final->t11_t12, 100 * 10);
7840 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
7841 bool force_disable_vdd)
7843 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7844 u32 pp_on, pp_off, port_sel = 0;
7845 int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
7846 struct pps_registers regs;
7847 enum port port = dp_to_dig_port(intel_dp)->base.port;
7848 const struct edp_power_seq *seq = &intel_dp->pps_delays;
7850 lockdep_assert_held(&dev_priv->pps_mutex);
7852 intel_pps_get_registers(intel_dp, ®s);
7855 * On some VLV machines the BIOS can leave the VDD
7856 * enabled even on power sequencers which aren't
7857 * hooked up to any port. This would mess up the
7858 * power domain tracking the first time we pick
7859 * one of these power sequencers for use since
7860 * edp_panel_vdd_on() would notice that the VDD was
7861 * already on and therefore wouldn't grab the power
7862 * domain reference. Disable VDD first to avoid this.
7863 * This also avoids spuriously turning the VDD on as
7864 * soon as the new power sequencer gets initialized.
7866 if (force_disable_vdd) {
7867 u32 pp = ilk_get_pp_control(intel_dp);
7869 drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
7870 "Panel power already on\n");
7872 if (pp & EDP_FORCE_VDD)
7873 drm_dbg_kms(&dev_priv->drm,
7874 "VDD already on, disabling first\n");
7876 pp &= ~EDP_FORCE_VDD;
7878 intel_de_write(dev_priv, regs.pp_ctrl, pp);
7881 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
7882 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
7883 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
7884 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
7886 /* Haswell doesn't have any port selection bits for the panel
7887 * power sequencer any more. */
7888 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7889 port_sel = PANEL_PORT_SELECT_VLV(port);
7890 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
7893 port_sel = PANEL_PORT_SELECT_DPA;
7896 port_sel = PANEL_PORT_SELECT_DPC;
7899 port_sel = PANEL_PORT_SELECT_DPD;
7909 intel_de_write(dev_priv, regs.pp_on, pp_on);
7910 intel_de_write(dev_priv, regs.pp_off, pp_off);
7913 * Compute the divisor for the pp clock, simply match the Bspec formula.
7915 if (i915_mmio_reg_valid(regs.pp_div)) {
7916 intel_de_write(dev_priv, regs.pp_div,
7917 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
7921 pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
7922 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
7923 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
7924 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
7927 drm_dbg_kms(&dev_priv->drm,
7928 "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
7929 intel_de_read(dev_priv, regs.pp_on),
7930 intel_de_read(dev_priv, regs.pp_off),
7931 i915_mmio_reg_valid(regs.pp_div) ?
7932 intel_de_read(dev_priv, regs.pp_div) :
7933 (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
7936 static void intel_dp_pps_init(struct intel_dp *intel_dp)
7938 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7940 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7941 vlv_initial_power_sequencer_setup(intel_dp);
7943 intel_dp_init_panel_power_sequencer(intel_dp);
7944 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
7949 * intel_dp_set_drrs_state - program registers for RR switch to take effect
7950 * @dev_priv: i915 device
7951 * @crtc_state: a pointer to the active intel_crtc_state
7952 * @refresh_rate: RR to be programmed
7954 * This function gets called when refresh rate (RR) has to be changed from
7955 * one frequency to another. Switches can be between high and low RR
7956 * supported by the panel or to any other RR based on media playback (in
7957 * this case, RR value needs to be passed from user space).
7959 * The caller of this function needs to take a lock on dev_priv->drrs.
7961 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
7962 const struct intel_crtc_state *crtc_state,
7965 struct intel_dp *intel_dp = dev_priv->drrs.dp;
7966 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
7967 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
7969 if (refresh_rate <= 0) {
7970 drm_dbg_kms(&dev_priv->drm,
7971 "Refresh rate should be positive non-zero.\n");
7975 if (intel_dp == NULL) {
7976 drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n");
7981 drm_dbg_kms(&dev_priv->drm,
7982 "DRRS: intel_crtc not initialized\n");
7986 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
7987 drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n");
7991 if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) ==
7993 index = DRRS_LOW_RR;
7995 if (index == dev_priv->drrs.refresh_rate_type) {
7996 drm_dbg_kms(&dev_priv->drm,
7997 "DRRS requested for previously set RR...ignoring\n");
8001 if (!crtc_state->hw.active) {
8002 drm_dbg_kms(&dev_priv->drm,
8003 "eDP encoder disabled. CRTC not Active\n");
8007 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
8010 intel_dp_set_m_n(crtc_state, M1_N1);
8013 intel_dp_set_m_n(crtc_state, M2_N2);
8017 drm_err(&dev_priv->drm,
8018 "Unsupported refreshrate type\n");
8020 } else if (INTEL_GEN(dev_priv) > 6) {
8021 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
8024 val = intel_de_read(dev_priv, reg);
8025 if (index > DRRS_HIGH_RR) {
8026 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8027 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
8029 val |= PIPECONF_EDP_RR_MODE_SWITCH;
8031 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8032 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
8034 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
8036 intel_de_write(dev_priv, reg, val);
8039 dev_priv->drrs.refresh_rate_type = index;
8041 drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n",
8046 intel_edp_drrs_enable_locked(struct intel_dp *intel_dp)
8048 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
8050 dev_priv->drrs.busy_frontbuffer_bits = 0;
8051 dev_priv->drrs.dp = intel_dp;
8055 * intel_edp_drrs_enable - init drrs struct if supported
8056 * @intel_dp: DP struct
8057 * @crtc_state: A pointer to the active crtc state.
8059 * Initializes frontbuffer_bits and drrs.dp
8061 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
8062 const struct intel_crtc_state *crtc_state)
8064 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
8066 if (!crtc_state->has_drrs)
8069 drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n");
8071 mutex_lock(&dev_priv->drrs.mutex);
8073 if (dev_priv->drrs.dp) {
8074 drm_warn(&dev_priv->drm, "DRRS already enabled\n");
8078 intel_edp_drrs_enable_locked(intel_dp);
8081 mutex_unlock(&dev_priv->drrs.mutex);
8085 intel_edp_drrs_disable_locked(struct intel_dp *intel_dp,
8086 const struct intel_crtc_state *crtc_state)
8088 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
8090 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
8093 refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode);
8094 intel_dp_set_drrs_state(dev_priv, crtc_state, refresh);
8097 dev_priv->drrs.dp = NULL;
8101 * intel_edp_drrs_disable - Disable DRRS
8102 * @intel_dp: DP struct
8103 * @old_crtc_state: Pointer to old crtc_state.
8106 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
8107 const struct intel_crtc_state *old_crtc_state)
8109 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
8111 if (!old_crtc_state->has_drrs)
8114 mutex_lock(&dev_priv->drrs.mutex);
8115 if (!dev_priv->drrs.dp) {
8116 mutex_unlock(&dev_priv->drrs.mutex);
8120 intel_edp_drrs_disable_locked(intel_dp, old_crtc_state);
8121 mutex_unlock(&dev_priv->drrs.mutex);
8123 cancel_delayed_work_sync(&dev_priv->drrs.work);
8127 * intel_edp_drrs_update - Update DRRS state
8128 * @intel_dp: Intel DP
8129 * @crtc_state: new CRTC state
8131 * This function will update DRRS states, disabling or enabling DRRS when
8132 * executing fastsets. For full modeset, intel_edp_drrs_disable() and
8133 * intel_edp_drrs_enable() should be called instead.
8136 intel_edp_drrs_update(struct intel_dp *intel_dp,
8137 const struct intel_crtc_state *crtc_state)
8139 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
8141 if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
8144 mutex_lock(&dev_priv->drrs.mutex);
8146 /* New state matches current one? */
8147 if (crtc_state->has_drrs == !!dev_priv->drrs.dp)
8150 if (crtc_state->has_drrs)
8151 intel_edp_drrs_enable_locked(intel_dp);
8153 intel_edp_drrs_disable_locked(intel_dp, crtc_state);
8156 mutex_unlock(&dev_priv->drrs.mutex);
8159 static void intel_edp_drrs_downclock_work(struct work_struct *work)
8161 struct drm_i915_private *dev_priv =
8162 container_of(work, typeof(*dev_priv), drrs.work.work);
8163 struct intel_dp *intel_dp;
8165 mutex_lock(&dev_priv->drrs.mutex);
8167 intel_dp = dev_priv->drrs.dp;
8173 * The delayed work can race with an invalidate hence we need to
8177 if (dev_priv->drrs.busy_frontbuffer_bits)
8180 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
8181 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
8183 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
8184 drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode));
8188 mutex_unlock(&dev_priv->drrs.mutex);
8192 * intel_edp_drrs_invalidate - Disable Idleness DRRS
8193 * @dev_priv: i915 device
8194 * @frontbuffer_bits: frontbuffer plane tracking bits
8196 * This function gets called everytime rendering on the given planes start.
8197 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
8199 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
8201 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
8202 unsigned int frontbuffer_bits)
8204 struct intel_dp *intel_dp;
8205 struct drm_crtc *crtc;
8208 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
8211 cancel_delayed_work(&dev_priv->drrs.work);
8213 mutex_lock(&dev_priv->drrs.mutex);
8215 intel_dp = dev_priv->drrs.dp;
8217 mutex_unlock(&dev_priv->drrs.mutex);
8221 crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
8222 pipe = to_intel_crtc(crtc)->pipe;
8224 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
8225 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
8227 /* invalidate means busy screen hence upclock */
8228 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
8229 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
8230 drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
8232 mutex_unlock(&dev_priv->drrs.mutex);
8236 * intel_edp_drrs_flush - Restart Idleness DRRS
8237 * @dev_priv: i915 device
8238 * @frontbuffer_bits: frontbuffer plane tracking bits
8240 * This function gets called every time rendering on the given planes has
8241 * completed or flip on a crtc is completed. So DRRS should be upclocked
8242 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
8243 * if no other planes are dirty.
8245 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
8247 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
8248 unsigned int frontbuffer_bits)
8250 struct intel_dp *intel_dp;
8251 struct drm_crtc *crtc;
8254 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
8257 cancel_delayed_work(&dev_priv->drrs.work);
8259 mutex_lock(&dev_priv->drrs.mutex);
8261 intel_dp = dev_priv->drrs.dp;
8263 mutex_unlock(&dev_priv->drrs.mutex);
8267 crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
8268 pipe = to_intel_crtc(crtc)->pipe;
8270 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
8271 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
8273 /* flush means busy screen hence upclock */
8274 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
8275 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
8276 drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
8279 * flush also means no more activity hence schedule downclock, if all
8280 * other fbs are quiescent too
8282 if (!dev_priv->drrs.busy_frontbuffer_bits)
8283 schedule_delayed_work(&dev_priv->drrs.work,
8284 msecs_to_jiffies(1000));
8285 mutex_unlock(&dev_priv->drrs.mutex);
8289 * DOC: Display Refresh Rate Switching (DRRS)
8291 * Display Refresh Rate Switching (DRRS) is a power conservation feature
8292 * which enables swtching between low and high refresh rates,
8293 * dynamically, based on the usage scenario. This feature is applicable
8294 * for internal panels.
8296 * Indication that the panel supports DRRS is given by the panel EDID, which
8297 * would list multiple refresh rates for one resolution.
8299 * DRRS is of 2 types - static and seamless.
8300 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
8301 * (may appear as a blink on screen) and is used in dock-undock scenario.
8302 * Seamless DRRS involves changing RR without any visual effect to the user
8303 * and can be used during normal system usage. This is done by programming
8304 * certain registers.
8306 * Support for static/seamless DRRS may be indicated in the VBT based on
8307 * inputs from the panel spec.
8309 * DRRS saves power by switching to low RR based on usage scenarios.
8311 * The implementation is based on frontbuffer tracking implementation. When
8312 * there is a disturbance on the screen triggered by user activity or a periodic
8313 * system activity, DRRS is disabled (RR is changed to high RR). When there is
8314 * no movement on screen, after a timeout of 1 second, a switch to low RR is
8317 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
8318 * and intel_edp_drrs_flush() are called.
8320 * DRRS can be further extended to support other internal panels and also
8321 * the scenario of video playback wherein RR is set based on the rate
8322 * requested by userspace.
8326 * intel_dp_drrs_init - Init basic DRRS work and mutex.
8327 * @connector: eDP connector
8328 * @fixed_mode: preferred mode of panel
8330 * This function is called only once at driver load to initialize basic
8334 * Downclock mode if panel supports it, else return NULL.
8335 * DRRS support is determined by the presence of downclock mode (apart
8336 * from VBT setting).
8338 static struct drm_display_mode *
8339 intel_dp_drrs_init(struct intel_connector *connector,
8340 struct drm_display_mode *fixed_mode)
8342 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
8343 struct drm_display_mode *downclock_mode = NULL;
8345 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
8346 mutex_init(&dev_priv->drrs.mutex);
8348 if (INTEL_GEN(dev_priv) <= 6) {
8349 drm_dbg_kms(&dev_priv->drm,
8350 "DRRS supported for Gen7 and above\n");
8354 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
8355 drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
8359 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
8360 if (!downclock_mode) {
8361 drm_dbg_kms(&dev_priv->drm,
8362 "Downclock mode is not found. DRRS not supported\n");
8366 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
8368 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
8369 drm_dbg_kms(&dev_priv->drm,
8370 "seamless DRRS supported for eDP panel.\n");
8371 return downclock_mode;
8374 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
8375 struct intel_connector *intel_connector)
8377 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
8378 struct drm_device *dev = &dev_priv->drm;
8379 struct drm_connector *connector = &intel_connector->base;
8380 struct drm_display_mode *fixed_mode = NULL;
8381 struct drm_display_mode *downclock_mode = NULL;
8383 enum pipe pipe = INVALID_PIPE;
8384 intel_wakeref_t wakeref;
8387 if (!intel_dp_is_edp(intel_dp))
8390 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
8393 * On IBX/CPT we may get here with LVDS already registered. Since the
8394 * driver uses the only internal power sequencer available for both
8395 * eDP and LVDS bail out early in this case to prevent interfering
8396 * with an already powered-on LVDS power sequencer.
8398 if (intel_get_lvds_encoder(dev_priv)) {
8400 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
8401 drm_info(&dev_priv->drm,
8402 "LVDS was detected, not registering eDP\n");
8407 with_pps_lock(intel_dp, wakeref) {
8408 intel_dp_init_panel_power_timestamps(intel_dp);
8409 intel_dp_pps_init(intel_dp);
8410 intel_edp_panel_vdd_sanitize(intel_dp);
8413 /* Cache DPCD and EDID for edp. */
8414 has_dpcd = intel_edp_init_dpcd(intel_dp);
8417 /* if this fails, presume the device is a ghost */
8418 drm_info(&dev_priv->drm,
8419 "failed to retrieve link info, disabling eDP\n");
8423 mutex_lock(&dev->mode_config.mutex);
8424 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
8426 if (drm_add_edid_modes(connector, edid)) {
8427 drm_connector_update_edid_property(connector, edid);
8428 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
8431 edid = ERR_PTR(-EINVAL);
8434 edid = ERR_PTR(-ENOENT);
8436 intel_connector->edid = edid;
8438 fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
8440 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
8442 /* fallback to VBT if available for eDP */
8444 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
8445 mutex_unlock(&dev->mode_config.mutex);
8447 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
8449 * Figure out the current pipe for the initial backlight setup.
8450 * If the current pipe isn't valid, try the PPS pipe, and if that
8451 * fails just assume pipe A.
8453 pipe = vlv_active_pipe(intel_dp);
8455 if (pipe != PIPE_A && pipe != PIPE_B)
8456 pipe = intel_dp->pps_pipe;
8458 if (pipe != PIPE_A && pipe != PIPE_B)
8461 drm_dbg_kms(&dev_priv->drm,
8462 "using pipe %c for initial backlight setup\n",
8466 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
8467 intel_connector->panel.backlight.power = intel_edp_backlight_power;
8468 intel_panel_setup_backlight(connector, pipe);
8471 drm_connector_set_panel_orientation_with_quirk(connector,
8472 dev_priv->vbt.orientation,
8473 fixed_mode->hdisplay, fixed_mode->vdisplay);
8479 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
8481 * vdd might still be enabled do to the delayed vdd off.
8482 * Make sure vdd is actually turned off here.
8484 with_pps_lock(intel_dp, wakeref)
8485 edp_panel_vdd_off_sync(intel_dp);
8490 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
8492 struct intel_connector *intel_connector;
8493 struct drm_connector *connector;
8495 intel_connector = container_of(work, typeof(*intel_connector),
8496 modeset_retry_work);
8497 connector = &intel_connector->base;
8498 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
8501 /* Grab the locks before changing connector property*/
8502 mutex_lock(&connector->dev->mode_config.mutex);
8503 /* Set connector link status to BAD and send a Uevent to notify
8504 * userspace to do a modeset.
8506 drm_connector_set_link_status_property(connector,
8507 DRM_MODE_LINK_STATUS_BAD);
8508 mutex_unlock(&connector->dev->mode_config.mutex);
8509 /* Send Hotplug uevent so userspace can reprobe */
8510 drm_kms_helper_hotplug_event(connector->dev);
8514 intel_dp_init_connector(struct intel_digital_port *dig_port,
8515 struct intel_connector *intel_connector)
8517 struct drm_connector *connector = &intel_connector->base;
8518 struct intel_dp *intel_dp = &dig_port->dp;
8519 struct intel_encoder *intel_encoder = &dig_port->base;
8520 struct drm_device *dev = intel_encoder->base.dev;
8521 struct drm_i915_private *dev_priv = to_i915(dev);
8522 enum port port = intel_encoder->port;
8523 enum phy phy = intel_port_to_phy(dev_priv, port);
8526 /* Initialize the work for modeset in case of link train failure */
8527 INIT_WORK(&intel_connector->modeset_retry_work,
8528 intel_dp_modeset_retry_work_fn);
8530 if (drm_WARN(dev, dig_port->max_lanes < 1,
8531 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
8532 dig_port->max_lanes, intel_encoder->base.base.id,
8533 intel_encoder->base.name))
8536 intel_dp_set_source_rates(intel_dp);
8538 intel_dp->reset_link_params = true;
8539 intel_dp->pps_pipe = INVALID_PIPE;
8540 intel_dp->active_pipe = INVALID_PIPE;
8542 /* Preserve the current hw state. */
8543 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
8544 intel_dp->attached_connector = intel_connector;
8546 if (intel_dp_is_port_edp(dev_priv, port)) {
8548 * Currently we don't support eDP on TypeC ports, although in
8549 * theory it could work on TypeC legacy ports.
8551 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
8552 type = DRM_MODE_CONNECTOR_eDP;
8554 type = DRM_MODE_CONNECTOR_DisplayPort;
8557 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8558 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
8561 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
8562 * for DP the encoder type can be set by the caller to
8563 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
8565 if (type == DRM_MODE_CONNECTOR_eDP)
8566 intel_encoder->type = INTEL_OUTPUT_EDP;
8568 /* eDP only on port B and/or C on vlv/chv */
8569 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
8570 IS_CHERRYVIEW(dev_priv)) &&
8571 intel_dp_is_edp(intel_dp) &&
8572 port != PORT_B && port != PORT_C))
8575 drm_dbg_kms(&dev_priv->drm,
8576 "Adding %s connector on [ENCODER:%d:%s]\n",
8577 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
8578 intel_encoder->base.base.id, intel_encoder->base.name);
8580 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
8581 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
8583 if (!HAS_GMCH(dev_priv))
8584 connector->interlace_allowed = true;
8585 connector->doublescan_allowed = 0;
8587 intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
8589 intel_dp_aux_init(intel_dp);
8591 intel_connector_attach_encoder(intel_connector, intel_encoder);
8593 if (HAS_DDI(dev_priv))
8594 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
8596 intel_connector->get_hw_state = intel_connector_get_hw_state;
8598 /* init MST on ports that can support it */
8599 intel_dp_mst_encoder_init(dig_port,
8600 intel_connector->base.base.id);
8602 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
8603 intel_dp_aux_fini(intel_dp);
8604 intel_dp_mst_encoder_cleanup(dig_port);
8608 intel_dp_add_properties(intel_dp, connector);
8610 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
8611 int ret = intel_dp_init_hdcp(dig_port, intel_connector);
8613 drm_dbg_kms(&dev_priv->drm,
8614 "HDCP init failed, skipping.\n");
8617 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
8618 * 0xd. Failure to do so will result in spurious interrupts being
8619 * generated on the port when a cable is not attached.
8621 if (IS_G45(dev_priv)) {
8622 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
8623 intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
8624 (temp & ~0xf) | 0xd);
8627 intel_dp->frl.is_trained = false;
8628 intel_dp->frl.trained_rate_gbps = 0;
8633 drm_connector_cleanup(connector);
8638 bool intel_dp_init(struct drm_i915_private *dev_priv,
8639 i915_reg_t output_reg,
8642 struct intel_digital_port *dig_port;
8643 struct intel_encoder *intel_encoder;
8644 struct drm_encoder *encoder;
8645 struct intel_connector *intel_connector;
8647 dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
8651 intel_connector = intel_connector_alloc();
8652 if (!intel_connector)
8653 goto err_connector_alloc;
8655 intel_encoder = &dig_port->base;
8656 encoder = &intel_encoder->base;
8658 mutex_init(&dig_port->hdcp_mutex);
8660 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
8661 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
8662 "DP %c", port_name(port)))
8663 goto err_encoder_init;
8665 intel_encoder->hotplug = intel_dp_hotplug;
8666 intel_encoder->compute_config = intel_dp_compute_config;
8667 intel_encoder->get_hw_state = intel_dp_get_hw_state;
8668 intel_encoder->get_config = intel_dp_get_config;
8669 intel_encoder->sync_state = intel_dp_sync_state;
8670 intel_encoder->initial_fastset_check = intel_dp_initial_fastset_check;
8671 intel_encoder->update_pipe = intel_panel_update_backlight;
8672 intel_encoder->suspend = intel_dp_encoder_suspend;
8673 intel_encoder->shutdown = intel_dp_encoder_shutdown;
8674 if (IS_CHERRYVIEW(dev_priv)) {
8675 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
8676 intel_encoder->pre_enable = chv_pre_enable_dp;
8677 intel_encoder->enable = vlv_enable_dp;
8678 intel_encoder->disable = vlv_disable_dp;
8679 intel_encoder->post_disable = chv_post_disable_dp;
8680 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
8681 } else if (IS_VALLEYVIEW(dev_priv)) {
8682 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
8683 intel_encoder->pre_enable = vlv_pre_enable_dp;
8684 intel_encoder->enable = vlv_enable_dp;
8685 intel_encoder->disable = vlv_disable_dp;
8686 intel_encoder->post_disable = vlv_post_disable_dp;
8688 intel_encoder->pre_enable = g4x_pre_enable_dp;
8689 intel_encoder->enable = g4x_enable_dp;
8690 intel_encoder->disable = g4x_disable_dp;
8691 intel_encoder->post_disable = g4x_post_disable_dp;
8694 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
8695 (HAS_PCH_CPT(dev_priv) && port != PORT_A))
8696 dig_port->dp.set_link_train = cpt_set_link_train;
8698 dig_port->dp.set_link_train = g4x_set_link_train;
8700 if (IS_CHERRYVIEW(dev_priv))
8701 dig_port->dp.set_signal_levels = chv_set_signal_levels;
8702 else if (IS_VALLEYVIEW(dev_priv))
8703 dig_port->dp.set_signal_levels = vlv_set_signal_levels;
8704 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
8705 dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
8706 else if (IS_GEN(dev_priv, 6) && port == PORT_A)
8707 dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
8709 dig_port->dp.set_signal_levels = g4x_set_signal_levels;
8711 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
8712 (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
8713 dig_port->dp.preemph_max = intel_dp_preemph_max_3;
8714 dig_port->dp.voltage_max = intel_dp_voltage_max_3;
8716 dig_port->dp.preemph_max = intel_dp_preemph_max_2;
8717 dig_port->dp.voltage_max = intel_dp_voltage_max_2;
8720 dig_port->dp.output_reg = output_reg;
8721 dig_port->max_lanes = 4;
8723 intel_encoder->type = INTEL_OUTPUT_DP;
8724 intel_encoder->power_domain = intel_port_to_power_domain(port);
8725 if (IS_CHERRYVIEW(dev_priv)) {
8727 intel_encoder->pipe_mask = BIT(PIPE_C);
8729 intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B);
8731 intel_encoder->pipe_mask = ~0;
8733 intel_encoder->cloneable = 0;
8734 intel_encoder->port = port;
8735 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
8737 dig_port->hpd_pulse = intel_dp_hpd_pulse;
8739 if (HAS_GMCH(dev_priv)) {
8740 if (IS_GM45(dev_priv))
8741 dig_port->connected = gm45_digital_port_connected;
8743 dig_port->connected = g4x_digital_port_connected;
8746 dig_port->connected = ilk_digital_port_connected;
8748 dig_port->connected = ibx_digital_port_connected;
8752 intel_infoframe_init(dig_port);
8754 dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
8755 if (!intel_dp_init_connector(dig_port, intel_connector))
8756 goto err_init_connector;
8761 drm_encoder_cleanup(encoder);
8763 kfree(intel_connector);
8764 err_connector_alloc:
8769 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
8771 struct intel_encoder *encoder;
8773 for_each_intel_encoder(&dev_priv->drm, encoder) {
8774 struct intel_dp *intel_dp;
8776 if (encoder->type != INTEL_OUTPUT_DDI)
8779 intel_dp = enc_to_intel_dp(encoder);
8781 if (!intel_dp->can_mst)
8784 if (intel_dp->is_mst)
8785 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
8789 void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
8791 struct intel_encoder *encoder;
8793 for_each_intel_encoder(&dev_priv->drm, encoder) {
8794 struct intel_dp *intel_dp;
8797 if (encoder->type != INTEL_OUTPUT_DDI)
8800 intel_dp = enc_to_intel_dp(encoder);
8802 if (!intel_dp->can_mst)
8805 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
8808 intel_dp->is_mst = false;
8809 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,