]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/display/intel_dp.c
drm/i915/panel: mass rename functions to have intel_panel_ prefix
[linux.git] / drivers / gpu / drm / i915 / display / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <[email protected]>
25  *
26  */
27
28 #include <linux/export.h>
29 #include <linux/i2c.h>
30 #include <linux/notifier.h>
31 #include <linux/slab.h>
32 #include <linux/types.h>
33
34 #include <asm/byteorder.h>
35
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_crtc.h>
38 #include <drm/drm_dp_helper.h>
39 #include <drm/drm_edid.h>
40 #include <drm/drm_probe_helper.h>
41
42 #include "g4x_dp.h"
43 #include "i915_debugfs.h"
44 #include "i915_drv.h"
45 #include "intel_atomic.h"
46 #include "intel_audio.h"
47 #include "intel_backlight.h"
48 #include "intel_connector.h"
49 #include "intel_ddi.h"
50 #include "intel_de.h"
51 #include "intel_display_types.h"
52 #include "intel_dp.h"
53 #include "intel_dp_aux.h"
54 #include "intel_dp_hdcp.h"
55 #include "intel_dp_link_training.h"
56 #include "intel_dp_mst.h"
57 #include "intel_dpio_phy.h"
58 #include "intel_dpll.h"
59 #include "intel_fifo_underrun.h"
60 #include "intel_hdcp.h"
61 #include "intel_hdmi.h"
62 #include "intel_hotplug.h"
63 #include "intel_lspcon.h"
64 #include "intel_lvds.h"
65 #include "intel_panel.h"
66 #include "intel_pps.h"
67 #include "intel_psr.h"
68 #include "intel_sideband.h"
69 #include "intel_tc.h"
70 #include "intel_vdsc.h"
71 #include "intel_vrr.h"
72
73 #define DP_DPRX_ESI_LEN 14
74
75 /* DP DSC throughput values used for slice count calculations KPixels/s */
76 #define DP_DSC_PEAK_PIXEL_RATE                  2720000
77 #define DP_DSC_MAX_ENC_THROUGHPUT_0             340000
78 #define DP_DSC_MAX_ENC_THROUGHPUT_1             400000
79
80 /* DP DSC FEC Overhead factor = 1/(0.972261) */
81 #define DP_DSC_FEC_OVERHEAD_FACTOR              972261
82
83 /* Compliance test status bits  */
84 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
85 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
86 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
87 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
88
89
90 /* Constants for DP DSC configurations */
91 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
92
93 /* With Single pipe configuration, HW is capable of supporting maximum
94  * of 4 slices per line.
95  */
96 static const u8 valid_dsc_slicecount[] = {1, 2, 4};
97
98 /**
99  * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
100  * @intel_dp: DP struct
101  *
102  * If a CPU or PCH DP output is attached to an eDP panel, this function
103  * will return true, and false otherwise.
104  */
105 bool intel_dp_is_edp(struct intel_dp *intel_dp)
106 {
107         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
108
109         return dig_port->base.type == INTEL_OUTPUT_EDP;
110 }
111
112 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
113 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc);
114
115 /* update sink rates from dpcd */
116 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
117 {
118         static const int dp_rates[] = {
119                 162000, 270000, 540000, 810000
120         };
121         int i, max_rate;
122         int max_lttpr_rate;
123
124         if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
125                 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
126                 static const int quirk_rates[] = { 162000, 270000, 324000 };
127
128                 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates));
129                 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates);
130
131                 return;
132         }
133
134         max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
135         max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps);
136         if (max_lttpr_rate)
137                 max_rate = min(max_rate, max_lttpr_rate);
138
139         for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
140                 if (dp_rates[i] > max_rate)
141                         break;
142                 intel_dp->sink_rates[i] = dp_rates[i];
143         }
144
145         /*
146          * Sink rates for 128b/132b. If set, sink should support all 8b/10b
147          * rates and 10 Gbps.
148          */
149         if (intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B) {
150                 u8 uhbr_rates = 0;
151
152                 BUILD_BUG_ON(ARRAY_SIZE(intel_dp->sink_rates) < ARRAY_SIZE(dp_rates) + 3);
153
154                 drm_dp_dpcd_readb(&intel_dp->aux,
155                                   DP_128B132B_SUPPORTED_LINK_RATES, &uhbr_rates);
156
157                 if (uhbr_rates & DP_UHBR10)
158                         intel_dp->sink_rates[i++] = 1000000;
159                 if (uhbr_rates & DP_UHBR13_5)
160                         intel_dp->sink_rates[i++] = 1350000;
161                 if (uhbr_rates & DP_UHBR20)
162                         intel_dp->sink_rates[i++] = 2000000;
163         }
164
165         intel_dp->num_sink_rates = i;
166 }
167
168 /* Get length of rates array potentially limited by max_rate. */
169 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
170 {
171         int i;
172
173         /* Limit results by potentially reduced max rate */
174         for (i = 0; i < len; i++) {
175                 if (rates[len - i - 1] <= max_rate)
176                         return len - i;
177         }
178
179         return 0;
180 }
181
182 /* Get length of common rates array potentially limited by max_rate. */
183 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
184                                           int max_rate)
185 {
186         return intel_dp_rate_limit_len(intel_dp->common_rates,
187                                        intel_dp->num_common_rates, max_rate);
188 }
189
190 /* Theoretical max between source and sink */
191 static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
192 {
193         return intel_dp->common_rates[intel_dp->num_common_rates - 1];
194 }
195
196 /* Theoretical max between source and sink */
197 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
198 {
199         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
200         int source_max = dig_port->max_lanes;
201         int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
202         int fia_max = intel_tc_port_fia_max_lane_count(dig_port);
203         int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps);
204
205         if (lttpr_max)
206                 sink_max = min(sink_max, lttpr_max);
207
208         return min3(source_max, sink_max, fia_max);
209 }
210
211 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
212 {
213         return intel_dp->max_link_lane_count;
214 }
215
216 /*
217  * The required data bandwidth for a mode with given pixel clock and bpp. This
218  * is the required net bandwidth independent of the data bandwidth efficiency.
219  */
220 int
221 intel_dp_link_required(int pixel_clock, int bpp)
222 {
223         /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
224         return DIV_ROUND_UP(pixel_clock * bpp, 8);
225 }
226
227 /*
228  * Given a link rate and lanes, get the data bandwidth.
229  *
230  * Data bandwidth is the actual payload rate, which depends on the data
231  * bandwidth efficiency and the link rate.
232  *
233  * For 8b/10b channel encoding, SST and non-FEC, the data bandwidth efficiency
234  * is 80%. For example, for a 1.62 Gbps link, 1.62*10^9 bps * 0.80 * (1/8) =
235  * 162000 kBps. With 8-bit symbols, we have 162000 kHz symbol clock. Just by
236  * coincidence, the port clock in kHz matches the data bandwidth in kBps, and
237  * they equal the link bit rate in Gbps multiplied by 100000. (Note that this no
238  * longer holds for data bandwidth as soon as FEC or MST is taken into account!)
239  *
240  * For 128b/132b channel encoding, the data bandwidth efficiency is 96.71%. For
241  * example, for a 10 Gbps link, 10*10^9 bps * 0.9671 * (1/8) = 1208875
242  * kBps. With 32-bit symbols, we have 312500 kHz symbol clock. The value 1000000
243  * does not match the symbol clock, the port clock (not even if you think in
244  * terms of a byte clock), nor the data bandwidth. It only matches the link bit
245  * rate in units of 10000 bps.
246  */
247 int
248 intel_dp_max_data_rate(int max_link_rate, int max_lanes)
249 {
250         if (max_link_rate >= 1000000) {
251                 /*
252                  * UHBR rates always use 128b/132b channel encoding, and have
253                  * 97.71% data bandwidth efficiency. Consider max_link_rate the
254                  * link bit rate in units of 10000 bps.
255                  */
256                 int max_link_rate_kbps = max_link_rate * 10;
257
258                 max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 9671, 10000);
259                 max_link_rate = max_link_rate_kbps / 8;
260         }
261
262         /*
263          * Lower than UHBR rates always use 8b/10b channel encoding, and have
264          * 80% data bandwidth efficiency for SST non-FEC. However, this turns
265          * out to be a nop by coincidence, and can be skipped:
266          *
267          *      int max_link_rate_kbps = max_link_rate * 10;
268          *      max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 8, 10);
269          *      max_link_rate = max_link_rate_kbps / 8;
270          */
271
272         return max_link_rate * max_lanes;
273 }
274
275 bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
276 {
277         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
278         struct intel_encoder *encoder = &intel_dig_port->base;
279         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
280
281         return DISPLAY_VER(dev_priv) >= 12 ||
282                 (DISPLAY_VER(dev_priv) == 11 &&
283                  encoder->port != PORT_A);
284 }
285
286 static int dg2_max_source_rate(struct intel_dp *intel_dp)
287 {
288         return intel_dp_is_edp(intel_dp) ? 810000 : 1350000;
289 }
290
291 static int icl_max_source_rate(struct intel_dp *intel_dp)
292 {
293         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
294         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
295         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
296
297         if (intel_phy_is_combo(dev_priv, phy) &&
298             !intel_dp_is_edp(intel_dp))
299                 return 540000;
300
301         return 810000;
302 }
303
304 static int ehl_max_source_rate(struct intel_dp *intel_dp)
305 {
306         if (intel_dp_is_edp(intel_dp))
307                 return 540000;
308
309         return 810000;
310 }
311
312 static void
313 intel_dp_set_source_rates(struct intel_dp *intel_dp)
314 {
315         /* The values must be in increasing order */
316         static const int icl_rates[] = {
317                 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000,
318                 1000000, 1350000,
319         };
320         static const int bxt_rates[] = {
321                 162000, 216000, 243000, 270000, 324000, 432000, 540000
322         };
323         static const int skl_rates[] = {
324                 162000, 216000, 270000, 324000, 432000, 540000
325         };
326         static const int hsw_rates[] = {
327                 162000, 270000, 540000
328         };
329         static const int g4x_rates[] = {
330                 162000, 270000
331         };
332         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
333         struct intel_encoder *encoder = &dig_port->base;
334         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
335         const int *source_rates;
336         int size, max_rate = 0, vbt_max_rate;
337
338         /* This should only be done once */
339         drm_WARN_ON(&dev_priv->drm,
340                     intel_dp->source_rates || intel_dp->num_source_rates);
341
342         if (DISPLAY_VER(dev_priv) >= 11) {
343                 source_rates = icl_rates;
344                 size = ARRAY_SIZE(icl_rates);
345                 if (IS_DG2(dev_priv))
346                         max_rate = dg2_max_source_rate(intel_dp);
347                 if (IS_JSL_EHL(dev_priv))
348                         max_rate = ehl_max_source_rate(intel_dp);
349                 else
350                         max_rate = icl_max_source_rate(intel_dp);
351         } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
352                 source_rates = bxt_rates;
353                 size = ARRAY_SIZE(bxt_rates);
354         } else if (DISPLAY_VER(dev_priv) == 9) {
355                 source_rates = skl_rates;
356                 size = ARRAY_SIZE(skl_rates);
357         } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
358                    IS_BROADWELL(dev_priv)) {
359                 source_rates = hsw_rates;
360                 size = ARRAY_SIZE(hsw_rates);
361         } else {
362                 source_rates = g4x_rates;
363                 size = ARRAY_SIZE(g4x_rates);
364         }
365
366         vbt_max_rate = intel_bios_dp_max_link_rate(encoder);
367         if (max_rate && vbt_max_rate)
368                 max_rate = min(max_rate, vbt_max_rate);
369         else if (vbt_max_rate)
370                 max_rate = vbt_max_rate;
371
372         if (max_rate)
373                 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
374
375         intel_dp->source_rates = source_rates;
376         intel_dp->num_source_rates = size;
377 }
378
379 static int intersect_rates(const int *source_rates, int source_len,
380                            const int *sink_rates, int sink_len,
381                            int *common_rates)
382 {
383         int i = 0, j = 0, k = 0;
384
385         while (i < source_len && j < sink_len) {
386                 if (source_rates[i] == sink_rates[j]) {
387                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
388                                 return k;
389                         common_rates[k] = source_rates[i];
390                         ++k;
391                         ++i;
392                         ++j;
393                 } else if (source_rates[i] < sink_rates[j]) {
394                         ++i;
395                 } else {
396                         ++j;
397                 }
398         }
399         return k;
400 }
401
402 /* return index of rate in rates array, or -1 if not found */
403 static int intel_dp_rate_index(const int *rates, int len, int rate)
404 {
405         int i;
406
407         for (i = 0; i < len; i++)
408                 if (rate == rates[i])
409                         return i;
410
411         return -1;
412 }
413
414 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
415 {
416         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
417
418         drm_WARN_ON(&i915->drm,
419                     !intel_dp->num_source_rates || !intel_dp->num_sink_rates);
420
421         intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
422                                                      intel_dp->num_source_rates,
423                                                      intel_dp->sink_rates,
424                                                      intel_dp->num_sink_rates,
425                                                      intel_dp->common_rates);
426
427         /* Paranoia, there should always be something in common. */
428         if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) {
429                 intel_dp->common_rates[0] = 162000;
430                 intel_dp->num_common_rates = 1;
431         }
432 }
433
434 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
435                                        u8 lane_count)
436 {
437         /*
438          * FIXME: we need to synchronize the current link parameters with
439          * hardware readout. Currently fast link training doesn't work on
440          * boot-up.
441          */
442         if (link_rate == 0 ||
443             link_rate > intel_dp->max_link_rate)
444                 return false;
445
446         if (lane_count == 0 ||
447             lane_count > intel_dp_max_lane_count(intel_dp))
448                 return false;
449
450         return true;
451 }
452
453 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
454                                                      int link_rate,
455                                                      u8 lane_count)
456 {
457         const struct drm_display_mode *fixed_mode =
458                 intel_dp->attached_connector->panel.fixed_mode;
459         int mode_rate, max_rate;
460
461         mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
462         max_rate = intel_dp_max_data_rate(link_rate, lane_count);
463         if (mode_rate > max_rate)
464                 return false;
465
466         return true;
467 }
468
469 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
470                                             int link_rate, u8 lane_count)
471 {
472         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
473         int index;
474
475         /*
476          * TODO: Enable fallback on MST links once MST link compute can handle
477          * the fallback params.
478          */
479         if (intel_dp->is_mst) {
480                 drm_err(&i915->drm, "Link Training Unsuccessful\n");
481                 return -1;
482         }
483
484         if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) {
485                 drm_dbg_kms(&i915->drm,
486                             "Retrying Link training for eDP with max parameters\n");
487                 intel_dp->use_max_params = true;
488                 return 0;
489         }
490
491         index = intel_dp_rate_index(intel_dp->common_rates,
492                                     intel_dp->num_common_rates,
493                                     link_rate);
494         if (index > 0) {
495                 if (intel_dp_is_edp(intel_dp) &&
496                     !intel_dp_can_link_train_fallback_for_edp(intel_dp,
497                                                               intel_dp->common_rates[index - 1],
498                                                               lane_count)) {
499                         drm_dbg_kms(&i915->drm,
500                                     "Retrying Link training for eDP with same parameters\n");
501                         return 0;
502                 }
503                 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
504                 intel_dp->max_link_lane_count = lane_count;
505         } else if (lane_count > 1) {
506                 if (intel_dp_is_edp(intel_dp) &&
507                     !intel_dp_can_link_train_fallback_for_edp(intel_dp,
508                                                               intel_dp_max_common_rate(intel_dp),
509                                                               lane_count >> 1)) {
510                         drm_dbg_kms(&i915->drm,
511                                     "Retrying Link training for eDP with same parameters\n");
512                         return 0;
513                 }
514                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
515                 intel_dp->max_link_lane_count = lane_count >> 1;
516         } else {
517                 drm_err(&i915->drm, "Link Training Unsuccessful\n");
518                 return -1;
519         }
520
521         return 0;
522 }
523
524 u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
525 {
526         return div_u64(mul_u32_u32(mode_clock, 1000000U),
527                        DP_DSC_FEC_OVERHEAD_FACTOR);
528 }
529
530 static int
531 small_joiner_ram_size_bits(struct drm_i915_private *i915)
532 {
533         if (DISPLAY_VER(i915) >= 11)
534                 return 7680 * 8;
535         else
536                 return 6144 * 8;
537 }
538
539 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
540                                        u32 link_clock, u32 lane_count,
541                                        u32 mode_clock, u32 mode_hdisplay,
542                                        bool bigjoiner,
543                                        u32 pipe_bpp)
544 {
545         u32 bits_per_pixel, max_bpp_small_joiner_ram;
546         int i;
547
548         /*
549          * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
550          * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
551          * for SST -> TimeSlotsPerMTP is 1,
552          * for MST -> TimeSlotsPerMTP has to be calculated
553          */
554         bits_per_pixel = (link_clock * lane_count * 8) /
555                          intel_dp_mode_to_fec_clock(mode_clock);
556         drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel);
557
558         /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
559         max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
560                 mode_hdisplay;
561
562         if (bigjoiner)
563                 max_bpp_small_joiner_ram *= 2;
564
565         drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n",
566                     max_bpp_small_joiner_ram);
567
568         /*
569          * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
570          * check, output bpp from small joiner RAM check)
571          */
572         bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
573
574         if (bigjoiner) {
575                 u32 max_bpp_bigjoiner =
576                         i915->max_cdclk_freq * 48 /
577                         intel_dp_mode_to_fec_clock(mode_clock);
578
579                 DRM_DEBUG_KMS("Max big joiner bpp: %u\n", max_bpp_bigjoiner);
580                 bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner);
581         }
582
583         /* Error out if the max bpp is less than smallest allowed valid bpp */
584         if (bits_per_pixel < valid_dsc_bpp[0]) {
585                 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
586                             bits_per_pixel, valid_dsc_bpp[0]);
587                 return 0;
588         }
589
590         /* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */
591         if (DISPLAY_VER(i915) >= 13) {
592                 bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1);
593         } else {
594                 /* Find the nearest match in the array of known BPPs from VESA */
595                 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
596                         if (bits_per_pixel < valid_dsc_bpp[i + 1])
597                                 break;
598                 }
599                 bits_per_pixel = valid_dsc_bpp[i];
600         }
601
602         /*
603          * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
604          * fractional part is 0
605          */
606         return bits_per_pixel << 4;
607 }
608
609 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
610                                        int mode_clock, int mode_hdisplay,
611                                        bool bigjoiner)
612 {
613         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
614         u8 min_slice_count, i;
615         int max_slice_width;
616
617         if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
618                 min_slice_count = DIV_ROUND_UP(mode_clock,
619                                                DP_DSC_MAX_ENC_THROUGHPUT_0);
620         else
621                 min_slice_count = DIV_ROUND_UP(mode_clock,
622                                                DP_DSC_MAX_ENC_THROUGHPUT_1);
623
624         max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
625         if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
626                 drm_dbg_kms(&i915->drm,
627                             "Unsupported slice width %d by DP DSC Sink device\n",
628                             max_slice_width);
629                 return 0;
630         }
631         /* Also take into account max slice width */
632         min_slice_count = max_t(u8, min_slice_count,
633                                 DIV_ROUND_UP(mode_hdisplay,
634                                              max_slice_width));
635
636         /* Find the closest match to the valid slice count values */
637         for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
638                 u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner;
639
640                 if (test_slice_count >
641                     drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, false))
642                         break;
643
644                 /* big joiner needs small joiner to be enabled */
645                 if (bigjoiner && test_slice_count < 4)
646                         continue;
647
648                 if (min_slice_count <= test_slice_count)
649                         return test_slice_count;
650         }
651
652         drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n",
653                     min_slice_count);
654         return 0;
655 }
656
657 static enum intel_output_format
658 intel_dp_output_format(struct drm_connector *connector,
659                        const struct drm_display_mode *mode)
660 {
661         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
662         const struct drm_display_info *info = &connector->display_info;
663
664         if (!connector->ycbcr_420_allowed ||
665             !drm_mode_is_420_only(info, mode))
666                 return INTEL_OUTPUT_FORMAT_RGB;
667
668         if (intel_dp->dfp.rgb_to_ycbcr &&
669             intel_dp->dfp.ycbcr_444_to_420)
670                 return INTEL_OUTPUT_FORMAT_RGB;
671
672         if (intel_dp->dfp.ycbcr_444_to_420)
673                 return INTEL_OUTPUT_FORMAT_YCBCR444;
674         else
675                 return INTEL_OUTPUT_FORMAT_YCBCR420;
676 }
677
678 int intel_dp_min_bpp(enum intel_output_format output_format)
679 {
680         if (output_format == INTEL_OUTPUT_FORMAT_RGB)
681                 return 6 * 3;
682         else
683                 return 8 * 3;
684 }
685
686 static int intel_dp_output_bpp(enum intel_output_format output_format, int bpp)
687 {
688         /*
689          * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
690          * format of the number of bytes per pixel will be half the number
691          * of bytes of RGB pixel.
692          */
693         if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
694                 bpp /= 2;
695
696         return bpp;
697 }
698
699 static int
700 intel_dp_mode_min_output_bpp(struct drm_connector *connector,
701                              const struct drm_display_mode *mode)
702 {
703         enum intel_output_format output_format =
704                 intel_dp_output_format(connector, mode);
705
706         return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format));
707 }
708
709 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
710                                   int hdisplay)
711 {
712         /*
713          * Older platforms don't like hdisplay==4096 with DP.
714          *
715          * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline
716          * and frame counter increment), but we don't get vblank interrupts,
717          * and the pipe underruns immediately. The link also doesn't seem
718          * to get trained properly.
719          *
720          * On CHV the vblank interrupts don't seem to disappear but
721          * otherwise the symptoms are similar.
722          *
723          * TODO: confirm the behaviour on HSW+
724          */
725         return hdisplay == 4096 && !HAS_DDI(dev_priv);
726 }
727
728 static enum drm_mode_status
729 intel_dp_mode_valid_downstream(struct intel_connector *connector,
730                                const struct drm_display_mode *mode,
731                                int target_clock)
732 {
733         struct intel_dp *intel_dp = intel_attached_dp(connector);
734         const struct drm_display_info *info = &connector->base.display_info;
735         int tmds_clock;
736
737         /* If PCON supports FRL MODE, check FRL bandwidth constraints */
738         if (intel_dp->dfp.pcon_max_frl_bw) {
739                 int target_bw;
740                 int max_frl_bw;
741                 int bpp = intel_dp_mode_min_output_bpp(&connector->base, mode);
742
743                 target_bw = bpp * target_clock;
744
745                 max_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
746
747                 /* converting bw from Gbps to Kbps*/
748                 max_frl_bw = max_frl_bw * 1000000;
749
750                 if (target_bw > max_frl_bw)
751                         return MODE_CLOCK_HIGH;
752
753                 return MODE_OK;
754         }
755
756         if (intel_dp->dfp.max_dotclock &&
757             target_clock > intel_dp->dfp.max_dotclock)
758                 return MODE_CLOCK_HIGH;
759
760         /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */
761         tmds_clock = target_clock;
762         if (drm_mode_is_420_only(info, mode))
763                 tmds_clock /= 2;
764
765         if (intel_dp->dfp.min_tmds_clock &&
766             tmds_clock < intel_dp->dfp.min_tmds_clock)
767                 return MODE_CLOCK_LOW;
768         if (intel_dp->dfp.max_tmds_clock &&
769             tmds_clock > intel_dp->dfp.max_tmds_clock)
770                 return MODE_CLOCK_HIGH;
771
772         return MODE_OK;
773 }
774
775 static enum drm_mode_status
776 intel_dp_mode_valid(struct drm_connector *connector,
777                     struct drm_display_mode *mode)
778 {
779         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
780         struct intel_connector *intel_connector = to_intel_connector(connector);
781         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
782         struct drm_i915_private *dev_priv = to_i915(connector->dev);
783         int target_clock = mode->clock;
784         int max_rate, mode_rate, max_lanes, max_link_clock;
785         int max_dotclk = dev_priv->max_dotclk_freq;
786         u16 dsc_max_output_bpp = 0;
787         u8 dsc_slice_count = 0;
788         enum drm_mode_status status;
789         bool dsc = false, bigjoiner = false;
790
791         if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
792                 return MODE_NO_DBLESCAN;
793
794         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
795                 return MODE_H_ILLEGAL;
796
797         if (intel_dp_is_edp(intel_dp) && fixed_mode) {
798                 if (mode->hdisplay != fixed_mode->hdisplay)
799                         return MODE_PANEL;
800
801                 if (mode->vdisplay != fixed_mode->vdisplay)
802                         return MODE_PANEL;
803
804                 target_clock = fixed_mode->clock;
805         }
806
807         if (mode->clock < 10000)
808                 return MODE_CLOCK_LOW;
809
810         if ((target_clock > max_dotclk || mode->hdisplay > 5120) &&
811             intel_dp_can_bigjoiner(intel_dp)) {
812                 bigjoiner = true;
813                 max_dotclk *= 2;
814         }
815         if (target_clock > max_dotclk)
816                 return MODE_CLOCK_HIGH;
817
818         max_link_clock = intel_dp_max_link_rate(intel_dp);
819         max_lanes = intel_dp_max_lane_count(intel_dp);
820
821         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
822         mode_rate = intel_dp_link_required(target_clock,
823                                            intel_dp_mode_min_output_bpp(connector, mode));
824
825         if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
826                 return MODE_H_ILLEGAL;
827
828         /*
829          * Output bpp is stored in 6.4 format so right shift by 4 to get the
830          * integer value since we support only integer values of bpp.
831          */
832         if (DISPLAY_VER(dev_priv) >= 10 &&
833             drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
834                 /*
835                  * TBD pass the connector BPC,
836                  * for now U8_MAX so that max BPC on that platform would be picked
837                  */
838                 int pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, U8_MAX);
839
840                 if (intel_dp_is_edp(intel_dp)) {
841                         dsc_max_output_bpp =
842                                 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
843                         dsc_slice_count =
844                                 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
845                                                                 true);
846                 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
847                         dsc_max_output_bpp =
848                                 intel_dp_dsc_get_output_bpp(dev_priv,
849                                                             max_link_clock,
850                                                             max_lanes,
851                                                             target_clock,
852                                                             mode->hdisplay,
853                                                             bigjoiner,
854                                                             pipe_bpp) >> 4;
855                         dsc_slice_count =
856                                 intel_dp_dsc_get_slice_count(intel_dp,
857                                                              target_clock,
858                                                              mode->hdisplay,
859                                                              bigjoiner);
860                 }
861
862                 dsc = dsc_max_output_bpp && dsc_slice_count;
863         }
864
865         /*
866          * Big joiner configuration needs DSC for TGL which is not true for
867          * XE_LPD where uncompressed joiner is supported.
868          */
869         if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc)
870                 return MODE_CLOCK_HIGH;
871
872         if (mode_rate > max_rate && !dsc)
873                 return MODE_CLOCK_HIGH;
874
875         status = intel_dp_mode_valid_downstream(intel_connector,
876                                                 mode, target_clock);
877         if (status != MODE_OK)
878                 return status;
879
880         return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner);
881 }
882
883 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
884 {
885         int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
886
887         return max_rate >= 540000;
888 }
889
890 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
891 {
892         int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
893
894         return max_rate >= 810000;
895 }
896
897 static void snprintf_int_array(char *str, size_t len,
898                                const int *array, int nelem)
899 {
900         int i;
901
902         str[0] = '\0';
903
904         for (i = 0; i < nelem; i++) {
905                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
906                 if (r >= len)
907                         return;
908                 str += r;
909                 len -= r;
910         }
911 }
912
913 static void intel_dp_print_rates(struct intel_dp *intel_dp)
914 {
915         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
916         char str[128]; /* FIXME: too big for stack? */
917
918         if (!drm_debug_enabled(DRM_UT_KMS))
919                 return;
920
921         snprintf_int_array(str, sizeof(str),
922                            intel_dp->source_rates, intel_dp->num_source_rates);
923         drm_dbg_kms(&i915->drm, "source rates: %s\n", str);
924
925         snprintf_int_array(str, sizeof(str),
926                            intel_dp->sink_rates, intel_dp->num_sink_rates);
927         drm_dbg_kms(&i915->drm, "sink rates: %s\n", str);
928
929         snprintf_int_array(str, sizeof(str),
930                            intel_dp->common_rates, intel_dp->num_common_rates);
931         drm_dbg_kms(&i915->drm, "common rates: %s\n", str);
932 }
933
934 int
935 intel_dp_max_link_rate(struct intel_dp *intel_dp)
936 {
937         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
938         int len;
939
940         len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
941         if (drm_WARN_ON(&i915->drm, len <= 0))
942                 return 162000;
943
944         return intel_dp->common_rates[len - 1];
945 }
946
947 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
948 {
949         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
950         int i = intel_dp_rate_index(intel_dp->sink_rates,
951                                     intel_dp->num_sink_rates, rate);
952
953         if (drm_WARN_ON(&i915->drm, i < 0))
954                 i = 0;
955
956         return i;
957 }
958
959 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
960                            u8 *link_bw, u8 *rate_select)
961 {
962         /* eDP 1.4 rate select method. */
963         if (intel_dp->use_rate_select) {
964                 *link_bw = 0;
965                 *rate_select =
966                         intel_dp_rate_select(intel_dp, port_clock);
967         } else {
968                 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
969                 *rate_select = 0;
970         }
971 }
972
973 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
974                                          const struct intel_crtc_state *pipe_config)
975 {
976         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
977
978         /* On TGL, FEC is supported on all Pipes */
979         if (DISPLAY_VER(dev_priv) >= 12)
980                 return true;
981
982         if (DISPLAY_VER(dev_priv) == 11 && pipe_config->cpu_transcoder != TRANSCODER_A)
983                 return true;
984
985         return false;
986 }
987
988 static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
989                                   const struct intel_crtc_state *pipe_config)
990 {
991         return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
992                 drm_dp_sink_supports_fec(intel_dp->fec_capable);
993 }
994
995 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
996                                   const struct intel_crtc_state *crtc_state)
997 {
998         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable)
999                 return false;
1000
1001         return intel_dsc_source_support(crtc_state) &&
1002                 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
1003 }
1004
1005 static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp,
1006                                    const struct intel_crtc_state *crtc_state)
1007 {
1008         return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
1009                 (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
1010                  intel_dp->dfp.ycbcr_444_to_420);
1011 }
1012
1013 static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp,
1014                                     const struct intel_crtc_state *crtc_state, int bpc)
1015 {
1016         int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8;
1017
1018         if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state))
1019                 clock /= 2;
1020
1021         return clock;
1022 }
1023
1024 static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp,
1025                                            const struct intel_crtc_state *crtc_state, int bpc)
1026 {
1027         int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc);
1028
1029         if (intel_dp->dfp.min_tmds_clock &&
1030             tmds_clock < intel_dp->dfp.min_tmds_clock)
1031                 return false;
1032
1033         if (intel_dp->dfp.max_tmds_clock &&
1034             tmds_clock > intel_dp->dfp.max_tmds_clock)
1035                 return false;
1036
1037         return true;
1038 }
1039
1040 static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp,
1041                                               const struct intel_crtc_state *crtc_state,
1042                                               int bpc)
1043 {
1044
1045         return intel_hdmi_deep_color_possible(crtc_state, bpc,
1046                                               intel_dp->has_hdmi_sink,
1047                                               intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) &&
1048                 intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc);
1049 }
1050
1051 static int intel_dp_max_bpp(struct intel_dp *intel_dp,
1052                             const struct intel_crtc_state *crtc_state)
1053 {
1054         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1055         struct intel_connector *intel_connector = intel_dp->attached_connector;
1056         int bpp, bpc;
1057
1058         bpc = crtc_state->pipe_bpp / 3;
1059
1060         if (intel_dp->dfp.max_bpc)
1061                 bpc = min_t(int, bpc, intel_dp->dfp.max_bpc);
1062
1063         if (intel_dp->dfp.min_tmds_clock) {
1064                 for (; bpc >= 10; bpc -= 2) {
1065                         if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc))
1066                                 break;
1067                 }
1068         }
1069
1070         bpp = bpc * 3;
1071         if (intel_dp_is_edp(intel_dp)) {
1072                 /* Get bpp from vbt only for panels that dont have bpp in edid */
1073                 if (intel_connector->base.display_info.bpc == 0 &&
1074                     dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
1075                         drm_dbg_kms(&dev_priv->drm,
1076                                     "clamping bpp for eDP panel to BIOS-provided %i\n",
1077                                     dev_priv->vbt.edp.bpp);
1078                         bpp = dev_priv->vbt.edp.bpp;
1079                 }
1080         }
1081
1082         return bpp;
1083 }
1084
1085 /* Adjust link config limits based on compliance test requests. */
1086 void
1087 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1088                                   struct intel_crtc_state *pipe_config,
1089                                   struct link_config_limits *limits)
1090 {
1091         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1092
1093         /* For DP Compliance we override the computed bpp for the pipe */
1094         if (intel_dp->compliance.test_data.bpc != 0) {
1095                 int bpp = 3 * intel_dp->compliance.test_data.bpc;
1096
1097                 limits->min_bpp = limits->max_bpp = bpp;
1098                 pipe_config->dither_force_disable = bpp == 6 * 3;
1099
1100                 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp);
1101         }
1102
1103         /* Use values requested by Compliance Test Request */
1104         if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1105                 int index;
1106
1107                 /* Validate the compliance test data since max values
1108                  * might have changed due to link train fallback.
1109                  */
1110                 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1111                                                intel_dp->compliance.test_lane_count)) {
1112                         index = intel_dp_rate_index(intel_dp->common_rates,
1113                                                     intel_dp->num_common_rates,
1114                                                     intel_dp->compliance.test_link_rate);
1115                         if (index >= 0)
1116                                 limits->min_rate = limits->max_rate =
1117                                         intel_dp->compliance.test_link_rate;
1118                         limits->min_lane_count = limits->max_lane_count =
1119                                 intel_dp->compliance.test_lane_count;
1120                 }
1121         }
1122 }
1123
1124 /* Optimize link config in order: max bpp, min clock, min lanes */
1125 static int
1126 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1127                                   struct intel_crtc_state *pipe_config,
1128                                   const struct link_config_limits *limits)
1129 {
1130         struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
1131         int bpp, i, lane_count;
1132         int mode_rate, link_rate, link_avail;
1133
1134         for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1135                 int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
1136
1137                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1138                                                    output_bpp);
1139
1140                 for (i = 0; i < intel_dp->num_common_rates; i++) {
1141                         link_rate = intel_dp->common_rates[i];
1142                         if (link_rate < limits->min_rate ||
1143                             link_rate > limits->max_rate)
1144                                 continue;
1145
1146                         for (lane_count = limits->min_lane_count;
1147                              lane_count <= limits->max_lane_count;
1148                              lane_count <<= 1) {
1149                                 link_avail = intel_dp_max_data_rate(link_rate,
1150                                                                     lane_count);
1151
1152                                 if (mode_rate <= link_avail) {
1153                                         pipe_config->lane_count = lane_count;
1154                                         pipe_config->pipe_bpp = bpp;
1155                                         pipe_config->port_clock = link_rate;
1156
1157                                         return 0;
1158                                 }
1159                         }
1160                 }
1161         }
1162
1163         return -EINVAL;
1164 }
1165
1166 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 max_req_bpc)
1167 {
1168         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1169         int i, num_bpc;
1170         u8 dsc_bpc[3] = {0};
1171         u8 dsc_max_bpc;
1172
1173         /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
1174         if (DISPLAY_VER(i915) >= 12)
1175                 dsc_max_bpc = min_t(u8, 12, max_req_bpc);
1176         else
1177                 dsc_max_bpc = min_t(u8, 10, max_req_bpc);
1178
1179         num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
1180                                                        dsc_bpc);
1181         for (i = 0; i < num_bpc; i++) {
1182                 if (dsc_max_bpc >= dsc_bpc[i])
1183                         return dsc_bpc[i] * 3;
1184         }
1185
1186         return 0;
1187 }
1188
1189 #define DSC_SUPPORTED_VERSION_MIN               1
1190
1191 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
1192                                        struct intel_crtc_state *crtc_state)
1193 {
1194         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1195         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1196         struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1197         u8 line_buf_depth;
1198         int ret;
1199
1200         /*
1201          * RC_MODEL_SIZE is currently a constant across all configurations.
1202          *
1203          * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and
1204          * DP_DSC_RC_BUF_SIZE for this.
1205          */
1206         vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
1207
1208         /*
1209          * Slice Height of 8 works for all currently available panels. So start
1210          * with that if pic_height is an integral multiple of 8. Eventually add
1211          * logic to try multiple slice heights.
1212          */
1213         if (vdsc_cfg->pic_height % 8 == 0)
1214                 vdsc_cfg->slice_height = 8;
1215         else if (vdsc_cfg->pic_height % 4 == 0)
1216                 vdsc_cfg->slice_height = 4;
1217         else
1218                 vdsc_cfg->slice_height = 2;
1219
1220         ret = intel_dsc_compute_params(encoder, crtc_state);
1221         if (ret)
1222                 return ret;
1223
1224         vdsc_cfg->dsc_version_major =
1225                 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
1226                  DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
1227         vdsc_cfg->dsc_version_minor =
1228                 min(DSC_SUPPORTED_VERSION_MIN,
1229                     (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
1230                      DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
1231
1232         vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
1233                 DP_DSC_RGB;
1234
1235         line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
1236         if (!line_buf_depth) {
1237                 drm_dbg_kms(&i915->drm,
1238                             "DSC Sink Line Buffer Depth invalid\n");
1239                 return -EINVAL;
1240         }
1241
1242         if (vdsc_cfg->dsc_version_minor == 2)
1243                 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
1244                         DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
1245         else
1246                 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
1247                         DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
1248
1249         vdsc_cfg->block_pred_enable =
1250                 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
1251                 DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
1252
1253         return drm_dsc_compute_rc_parameters(vdsc_cfg);
1254 }
1255
1256 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
1257                                        struct intel_crtc_state *pipe_config,
1258                                        struct drm_connector_state *conn_state,
1259                                        struct link_config_limits *limits)
1260 {
1261         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1262         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1263         const struct drm_display_mode *adjusted_mode =
1264                 &pipe_config->hw.adjusted_mode;
1265         int pipe_bpp;
1266         int ret;
1267
1268         pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
1269                 intel_dp_supports_fec(intel_dp, pipe_config);
1270
1271         if (!intel_dp_supports_dsc(intel_dp, pipe_config))
1272                 return -EINVAL;
1273
1274         pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, conn_state->max_requested_bpc);
1275
1276         /* Min Input BPC for ICL+ is 8 */
1277         if (pipe_bpp < 8 * 3) {
1278                 drm_dbg_kms(&dev_priv->drm,
1279                             "No DSC support for less than 8bpc\n");
1280                 return -EINVAL;
1281         }
1282
1283         /*
1284          * For now enable DSC for max bpp, max link rate, max lane count.
1285          * Optimize this later for the minimum possible link rate/lane count
1286          * with DSC enabled for the requested mode.
1287          */
1288         pipe_config->pipe_bpp = pipe_bpp;
1289         pipe_config->port_clock = limits->max_rate;
1290         pipe_config->lane_count = limits->max_lane_count;
1291
1292         if (intel_dp_is_edp(intel_dp)) {
1293                 pipe_config->dsc.compressed_bpp =
1294                         min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
1295                               pipe_config->pipe_bpp);
1296                 pipe_config->dsc.slice_count =
1297                         drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
1298                                                         true);
1299         } else {
1300                 u16 dsc_max_output_bpp;
1301                 u8 dsc_dp_slice_count;
1302
1303                 dsc_max_output_bpp =
1304                         intel_dp_dsc_get_output_bpp(dev_priv,
1305                                                     pipe_config->port_clock,
1306                                                     pipe_config->lane_count,
1307                                                     adjusted_mode->crtc_clock,
1308                                                     adjusted_mode->crtc_hdisplay,
1309                                                     pipe_config->bigjoiner,
1310                                                     pipe_bpp);
1311                 dsc_dp_slice_count =
1312                         intel_dp_dsc_get_slice_count(intel_dp,
1313                                                      adjusted_mode->crtc_clock,
1314                                                      adjusted_mode->crtc_hdisplay,
1315                                                      pipe_config->bigjoiner);
1316                 if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
1317                         drm_dbg_kms(&dev_priv->drm,
1318                                     "Compressed BPP/Slice Count not supported\n");
1319                         return -EINVAL;
1320                 }
1321                 pipe_config->dsc.compressed_bpp = min_t(u16,
1322                                                                dsc_max_output_bpp >> 4,
1323                                                                pipe_config->pipe_bpp);
1324                 pipe_config->dsc.slice_count = dsc_dp_slice_count;
1325         }
1326
1327         /* As of today we support DSC for only RGB */
1328         if (intel_dp->force_dsc_bpp) {
1329                 if (intel_dp->force_dsc_bpp >= 8 &&
1330                     intel_dp->force_dsc_bpp < pipe_bpp) {
1331                         drm_dbg_kms(&dev_priv->drm,
1332                                     "DSC BPP forced to %d",
1333                                     intel_dp->force_dsc_bpp);
1334                         pipe_config->dsc.compressed_bpp =
1335                                                 intel_dp->force_dsc_bpp;
1336                 } else {
1337                         drm_dbg_kms(&dev_priv->drm,
1338                                     "Invalid DSC BPP %d",
1339                                     intel_dp->force_dsc_bpp);
1340                 }
1341         }
1342
1343         /*
1344          * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
1345          * is greater than the maximum Cdclock and if slice count is even
1346          * then we need to use 2 VDSC instances.
1347          */
1348         if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq ||
1349             pipe_config->bigjoiner) {
1350                 if (pipe_config->dsc.slice_count < 2) {
1351                         drm_dbg_kms(&dev_priv->drm,
1352                                     "Cannot split stream to use 2 VDSC instances\n");
1353                         return -EINVAL;
1354                 }
1355
1356                 pipe_config->dsc.dsc_split = true;
1357         }
1358
1359         ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
1360         if (ret < 0) {
1361                 drm_dbg_kms(&dev_priv->drm,
1362                             "Cannot compute valid DSC parameters for Input Bpp = %d "
1363                             "Compressed BPP = %d\n",
1364                             pipe_config->pipe_bpp,
1365                             pipe_config->dsc.compressed_bpp);
1366                 return ret;
1367         }
1368
1369         pipe_config->dsc.compression_enable = true;
1370         drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
1371                     "Compressed Bpp = %d Slice Count = %d\n",
1372                     pipe_config->pipe_bpp,
1373                     pipe_config->dsc.compressed_bpp,
1374                     pipe_config->dsc.slice_count);
1375
1376         return 0;
1377 }
1378
1379 static int
1380 intel_dp_compute_link_config(struct intel_encoder *encoder,
1381                              struct intel_crtc_state *pipe_config,
1382                              struct drm_connector_state *conn_state)
1383 {
1384         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1385         const struct drm_display_mode *adjusted_mode =
1386                 &pipe_config->hw.adjusted_mode;
1387         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1388         struct link_config_limits limits;
1389         int common_len;
1390         int ret;
1391
1392         common_len = intel_dp_common_len_rate_limit(intel_dp,
1393                                                     intel_dp->max_link_rate);
1394
1395         /* No common link rates between source and sink */
1396         drm_WARN_ON(encoder->base.dev, common_len <= 0);
1397
1398         limits.min_rate = intel_dp->common_rates[0];
1399         limits.max_rate = intel_dp->common_rates[common_len - 1];
1400
1401         limits.min_lane_count = 1;
1402         limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
1403
1404         limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format);
1405         limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config);
1406
1407         if (intel_dp->use_max_params) {
1408                 /*
1409                  * Use the maximum clock and number of lanes the eDP panel
1410                  * advertizes being capable of in case the initial fast
1411                  * optimal params failed us. The panels are generally
1412                  * designed to support only a single clock and lane
1413                  * configuration, and typically on older panels these
1414                  * values correspond to the native resolution of the panel.
1415                  */
1416                 limits.min_lane_count = limits.max_lane_count;
1417                 limits.min_rate = limits.max_rate;
1418         }
1419
1420         intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
1421
1422         drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i "
1423                     "max rate %d max bpp %d pixel clock %iKHz\n",
1424                     limits.max_lane_count, limits.max_rate,
1425                     limits.max_bpp, adjusted_mode->crtc_clock);
1426
1427         if ((adjusted_mode->crtc_clock > i915->max_dotclk_freq ||
1428              adjusted_mode->crtc_hdisplay > 5120) &&
1429             intel_dp_can_bigjoiner(intel_dp))
1430                 pipe_config->bigjoiner = true;
1431
1432         /*
1433          * Optimize for slow and wide for everything, because there are some
1434          * eDP 1.3 and 1.4 panels don't work well with fast and narrow.
1435          */
1436         ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
1437
1438         /*
1439          * Pipe joiner needs compression upto display12 due to BW limitation. DG2
1440          * onwards pipe joiner can be enabled without compression.
1441          */
1442         drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
1443         if (ret || intel_dp->force_dsc_en || (DISPLAY_VER(i915) < 13 &&
1444                                               pipe_config->bigjoiner)) {
1445                 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
1446                                                   conn_state, &limits);
1447                 if (ret < 0)
1448                         return ret;
1449         }
1450
1451         if (pipe_config->dsc.compression_enable) {
1452                 drm_dbg_kms(&i915->drm,
1453                             "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
1454                             pipe_config->lane_count, pipe_config->port_clock,
1455                             pipe_config->pipe_bpp,
1456                             pipe_config->dsc.compressed_bpp);
1457
1458                 drm_dbg_kms(&i915->drm,
1459                             "DP link rate required %i available %i\n",
1460                             intel_dp_link_required(adjusted_mode->crtc_clock,
1461                                                    pipe_config->dsc.compressed_bpp),
1462                             intel_dp_max_data_rate(pipe_config->port_clock,
1463                                                    pipe_config->lane_count));
1464         } else {
1465                 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n",
1466                             pipe_config->lane_count, pipe_config->port_clock,
1467                             pipe_config->pipe_bpp);
1468
1469                 drm_dbg_kms(&i915->drm,
1470                             "DP link rate required %i available %i\n",
1471                             intel_dp_link_required(adjusted_mode->crtc_clock,
1472                                                    pipe_config->pipe_bpp),
1473                             intel_dp_max_data_rate(pipe_config->port_clock,
1474                                                    pipe_config->lane_count));
1475         }
1476         return 0;
1477 }
1478
1479 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
1480                                   const struct drm_connector_state *conn_state)
1481 {
1482         const struct intel_digital_connector_state *intel_conn_state =
1483                 to_intel_digital_connector_state(conn_state);
1484         const struct drm_display_mode *adjusted_mode =
1485                 &crtc_state->hw.adjusted_mode;
1486
1487         /*
1488          * Our YCbCr output is always limited range.
1489          * crtc_state->limited_color_range only applies to RGB,
1490          * and it must never be set for YCbCr or we risk setting
1491          * some conflicting bits in PIPECONF which will mess up
1492          * the colors on the monitor.
1493          */
1494         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
1495                 return false;
1496
1497         if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
1498                 /*
1499                  * See:
1500                  * CEA-861-E - 5.1 Default Encoding Parameters
1501                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1502                  */
1503                 return crtc_state->pipe_bpp != 18 &&
1504                         drm_default_rgb_quant_range(adjusted_mode) ==
1505                         HDMI_QUANTIZATION_RANGE_LIMITED;
1506         } else {
1507                 return intel_conn_state->broadcast_rgb ==
1508                         INTEL_BROADCAST_RGB_LIMITED;
1509         }
1510 }
1511
1512 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
1513                                     enum port port)
1514 {
1515         if (IS_G4X(dev_priv))
1516                 return false;
1517         if (DISPLAY_VER(dev_priv) < 12 && port == PORT_A)
1518                 return false;
1519
1520         return true;
1521 }
1522
1523 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state,
1524                                              const struct drm_connector_state *conn_state,
1525                                              struct drm_dp_vsc_sdp *vsc)
1526 {
1527         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1528         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1529
1530         /*
1531          * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
1532          * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
1533          * Colorimetry Format indication.
1534          */
1535         vsc->revision = 0x5;
1536         vsc->length = 0x13;
1537
1538         /* DP 1.4a spec, Table 2-120 */
1539         switch (crtc_state->output_format) {
1540         case INTEL_OUTPUT_FORMAT_YCBCR444:
1541                 vsc->pixelformat = DP_PIXELFORMAT_YUV444;
1542                 break;
1543         case INTEL_OUTPUT_FORMAT_YCBCR420:
1544                 vsc->pixelformat = DP_PIXELFORMAT_YUV420;
1545                 break;
1546         case INTEL_OUTPUT_FORMAT_RGB:
1547         default:
1548                 vsc->pixelformat = DP_PIXELFORMAT_RGB;
1549         }
1550
1551         switch (conn_state->colorspace) {
1552         case DRM_MODE_COLORIMETRY_BT709_YCC:
1553                 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
1554                 break;
1555         case DRM_MODE_COLORIMETRY_XVYCC_601:
1556                 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601;
1557                 break;
1558         case DRM_MODE_COLORIMETRY_XVYCC_709:
1559                 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709;
1560                 break;
1561         case DRM_MODE_COLORIMETRY_SYCC_601:
1562                 vsc->colorimetry = DP_COLORIMETRY_SYCC_601;
1563                 break;
1564         case DRM_MODE_COLORIMETRY_OPYCC_601:
1565                 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601;
1566                 break;
1567         case DRM_MODE_COLORIMETRY_BT2020_CYCC:
1568                 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC;
1569                 break;
1570         case DRM_MODE_COLORIMETRY_BT2020_RGB:
1571                 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB;
1572                 break;
1573         case DRM_MODE_COLORIMETRY_BT2020_YCC:
1574                 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC;
1575                 break;
1576         case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
1577         case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
1578                 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB;
1579                 break;
1580         default:
1581                 /*
1582                  * RGB->YCBCR color conversion uses the BT.709
1583                  * color space.
1584                  */
1585                 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
1586                         vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
1587                 else
1588                         vsc->colorimetry = DP_COLORIMETRY_DEFAULT;
1589                 break;
1590         }
1591
1592         vsc->bpc = crtc_state->pipe_bpp / 3;
1593
1594         /* only RGB pixelformat supports 6 bpc */
1595         drm_WARN_ON(&dev_priv->drm,
1596                     vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB);
1597
1598         /* all YCbCr are always limited range */
1599         vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA;
1600         vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED;
1601 }
1602
1603 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
1604                                      struct intel_crtc_state *crtc_state,
1605                                      const struct drm_connector_state *conn_state)
1606 {
1607         struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc;
1608
1609         /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */
1610         if (crtc_state->has_psr)
1611                 return;
1612
1613         if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
1614                 return;
1615
1616         crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1617         vsc->sdp_type = DP_SDP_VSC;
1618         intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
1619                                          &crtc_state->infoframes.vsc);
1620 }
1621
1622 void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
1623                                   const struct intel_crtc_state *crtc_state,
1624                                   const struct drm_connector_state *conn_state,
1625                                   struct drm_dp_vsc_sdp *vsc)
1626 {
1627         vsc->sdp_type = DP_SDP_VSC;
1628
1629         if (intel_dp->psr.psr2_enabled) {
1630                 if (intel_dp->psr.colorimetry_support &&
1631                     intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
1632                         /* [PSR2, +Colorimetry] */
1633                         intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
1634                                                          vsc);
1635                 } else {
1636                         /*
1637                          * [PSR2, -Colorimetry]
1638                          * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
1639                          * 3D stereo + PSR/PSR2 + Y-coordinate.
1640                          */
1641                         vsc->revision = 0x4;
1642                         vsc->length = 0xe;
1643                 }
1644         } else {
1645                 /*
1646                  * [PSR1]
1647                  * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
1648                  * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or
1649                  * higher).
1650                  */
1651                 vsc->revision = 0x2;
1652                 vsc->length = 0x8;
1653         }
1654 }
1655
1656 static void
1657 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
1658                                             struct intel_crtc_state *crtc_state,
1659                                             const struct drm_connector_state *conn_state)
1660 {
1661         int ret;
1662         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1663         struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm;
1664
1665         if (!conn_state->hdr_output_metadata)
1666                 return;
1667
1668         ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state);
1669
1670         if (ret) {
1671                 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n");
1672                 return;
1673         }
1674
1675         crtc_state->infoframes.enable |=
1676                 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
1677 }
1678
1679 static void
1680 intel_dp_drrs_compute_config(struct intel_dp *intel_dp,
1681                              struct intel_crtc_state *pipe_config,
1682                              int output_bpp, bool constant_n)
1683 {
1684         struct intel_connector *intel_connector = intel_dp->attached_connector;
1685         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1686         int pixel_clock;
1687
1688         if (pipe_config->vrr.enable)
1689                 return;
1690
1691         /*
1692          * DRRS and PSR can't be enable together, so giving preference to PSR
1693          * as it allows more power-savings by complete shutting down display,
1694          * so to guarantee this, intel_dp_drrs_compute_config() must be called
1695          * after intel_psr_compute_config().
1696          */
1697         if (pipe_config->has_psr)
1698                 return;
1699
1700         if (!intel_connector->panel.downclock_mode ||
1701             dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
1702                 return;
1703
1704         pipe_config->has_drrs = true;
1705
1706         pixel_clock = intel_connector->panel.downclock_mode->clock;
1707         if (pipe_config->splitter.enable)
1708                 pixel_clock /= pipe_config->splitter.link_count;
1709
1710         intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock,
1711                                pipe_config->port_clock, &pipe_config->dp_m2_n2,
1712                                constant_n, pipe_config->fec_enable);
1713
1714         /* FIXME: abstract this better */
1715         if (pipe_config->splitter.enable)
1716                 pipe_config->dp_m2_n2.gmch_m *= pipe_config->splitter.link_count;
1717 }
1718
1719 int
1720 intel_dp_compute_config(struct intel_encoder *encoder,
1721                         struct intel_crtc_state *pipe_config,
1722                         struct drm_connector_state *conn_state)
1723 {
1724         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1725         struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
1726         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1727         enum port port = encoder->port;
1728         struct intel_connector *intel_connector = intel_dp->attached_connector;
1729         struct intel_digital_connector_state *intel_conn_state =
1730                 to_intel_digital_connector_state(conn_state);
1731         bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N);
1732         int ret = 0, output_bpp;
1733
1734         if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
1735                 pipe_config->has_pch_encoder = true;
1736
1737         pipe_config->output_format = intel_dp_output_format(&intel_connector->base,
1738                                                             adjusted_mode);
1739
1740         if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
1741                 ret = intel_panel_fitting(pipe_config, conn_state);
1742                 if (ret)
1743                         return ret;
1744         }
1745
1746         if (!intel_dp_port_has_audio(dev_priv, port))
1747                 pipe_config->has_audio = false;
1748         else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
1749                 pipe_config->has_audio = intel_dp->has_audio;
1750         else
1751                 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
1752
1753         if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1754                 intel_panel_fixed_mode(intel_connector->panel.fixed_mode,
1755                                        adjusted_mode);
1756
1757                 ret = intel_panel_fitting(pipe_config, conn_state);
1758                 if (ret)
1759                         return ret;
1760         }
1761
1762         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
1763                 return -EINVAL;
1764
1765         if (HAS_GMCH(dev_priv) &&
1766             adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
1767                 return -EINVAL;
1768
1769         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1770                 return -EINVAL;
1771
1772         if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
1773                 return -EINVAL;
1774
1775         ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
1776         if (ret < 0)
1777                 return ret;
1778
1779         pipe_config->limited_color_range =
1780                 intel_dp_limited_color_range(pipe_config, conn_state);
1781
1782         if (pipe_config->dsc.compression_enable)
1783                 output_bpp = pipe_config->dsc.compressed_bpp;
1784         else
1785                 output_bpp = intel_dp_output_bpp(pipe_config->output_format,
1786                                                  pipe_config->pipe_bpp);
1787
1788         if (intel_dp->mso_link_count) {
1789                 int n = intel_dp->mso_link_count;
1790                 int overlap = intel_dp->mso_pixel_overlap;
1791
1792                 pipe_config->splitter.enable = true;
1793                 pipe_config->splitter.link_count = n;
1794                 pipe_config->splitter.pixel_overlap = overlap;
1795
1796                 drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n",
1797                             n, overlap);
1798
1799                 adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap;
1800                 adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap;
1801                 adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap;
1802                 adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap;
1803                 adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap;
1804                 adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap;
1805                 adjusted_mode->crtc_clock /= n;
1806         }
1807
1808         intel_link_compute_m_n(output_bpp,
1809                                pipe_config->lane_count,
1810                                adjusted_mode->crtc_clock,
1811                                pipe_config->port_clock,
1812                                &pipe_config->dp_m_n,
1813                                constant_n, pipe_config->fec_enable);
1814
1815         /* FIXME: abstract this better */
1816         if (pipe_config->splitter.enable)
1817                 pipe_config->dp_m_n.gmch_m *= pipe_config->splitter.link_count;
1818
1819         if (!HAS_DDI(dev_priv))
1820                 g4x_dp_set_clock(encoder, pipe_config);
1821
1822         intel_vrr_compute_config(pipe_config, conn_state);
1823         intel_psr_compute_config(intel_dp, pipe_config);
1824         intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp,
1825                                      constant_n);
1826         intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
1827         intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
1828
1829         return 0;
1830 }
1831
1832 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1833                               int link_rate, int lane_count)
1834 {
1835         intel_dp->link_trained = false;
1836         intel_dp->link_rate = link_rate;
1837         intel_dp->lane_count = lane_count;
1838 }
1839
1840 /* Enable backlight PWM and backlight PP control. */
1841 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
1842                             const struct drm_connector_state *conn_state)
1843 {
1844         struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
1845         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1846
1847         if (!intel_dp_is_edp(intel_dp))
1848                 return;
1849
1850         drm_dbg_kms(&i915->drm, "\n");
1851
1852         intel_backlight_enable(crtc_state, conn_state);
1853         intel_pps_backlight_on(intel_dp);
1854 }
1855
1856 /* Disable backlight PP control and backlight PWM. */
1857 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
1858 {
1859         struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
1860         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1861
1862         if (!intel_dp_is_edp(intel_dp))
1863                 return;
1864
1865         drm_dbg_kms(&i915->drm, "\n");
1866
1867         intel_pps_backlight_off(intel_dp);
1868         intel_backlight_disable(old_conn_state);
1869 }
1870
1871 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
1872 {
1873         /*
1874          * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
1875          * be capable of signalling downstream hpd with a long pulse.
1876          * Whether or not that means D3 is safe to use is not clear,
1877          * but let's assume so until proven otherwise.
1878          *
1879          * FIXME should really check all downstream ports...
1880          */
1881         return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
1882                 drm_dp_is_branch(intel_dp->dpcd) &&
1883                 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
1884 }
1885
1886 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
1887                                            const struct intel_crtc_state *crtc_state,
1888                                            bool enable)
1889 {
1890         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1891         int ret;
1892
1893         if (!crtc_state->dsc.compression_enable)
1894                 return;
1895
1896         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
1897                                  enable ? DP_DECOMPRESSION_EN : 0);
1898         if (ret < 0)
1899                 drm_dbg_kms(&i915->drm,
1900                             "Failed to %s sink decompression state\n",
1901                             enabledisable(enable));
1902 }
1903
1904 static void
1905 intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
1906 {
1907         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1908         u8 oui[] = { 0x00, 0xaa, 0x01 };
1909         u8 buf[3] = { 0 };
1910
1911         /*
1912          * During driver init, we want to be careful and avoid changing the source OUI if it's
1913          * already set to what we want, so as to avoid clearing any state by accident
1914          */
1915         if (careful) {
1916                 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0)
1917                         drm_err(&i915->drm, "Failed to read source OUI\n");
1918
1919                 if (memcmp(oui, buf, sizeof(oui)) == 0)
1920                         return;
1921         }
1922
1923         if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
1924                 drm_err(&i915->drm, "Failed to write source OUI\n");
1925 }
1926
1927 /* If the device supports it, try to set the power state appropriately */
1928 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
1929 {
1930         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1931         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1932         int ret, i;
1933
1934         /* Should have a valid DPCD by this point */
1935         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1936                 return;
1937
1938         if (mode != DP_SET_POWER_D0) {
1939                 if (downstream_hpd_needs_d0(intel_dp))
1940                         return;
1941
1942                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
1943         } else {
1944                 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
1945
1946                 lspcon_resume(dp_to_dig_port(intel_dp));
1947
1948                 /* Write the source OUI as early as possible */
1949                 if (intel_dp_is_edp(intel_dp))
1950                         intel_edp_init_source_oui(intel_dp, false);
1951
1952                 /*
1953                  * When turning on, we need to retry for 1ms to give the sink
1954                  * time to wake up.
1955                  */
1956                 for (i = 0; i < 3; i++) {
1957                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
1958                         if (ret == 1)
1959                                 break;
1960                         msleep(1);
1961                 }
1962
1963                 if (ret == 1 && lspcon->active)
1964                         lspcon_wait_pcon_mode(lspcon);
1965         }
1966
1967         if (ret != 1)
1968                 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n",
1969                             encoder->base.base.id, encoder->base.name,
1970                             mode == DP_SET_POWER_D0 ? "D0" : "D3");
1971 }
1972
1973 static bool
1974 intel_dp_get_dpcd(struct intel_dp *intel_dp);
1975
1976 /**
1977  * intel_dp_sync_state - sync the encoder state during init/resume
1978  * @encoder: intel encoder to sync
1979  * @crtc_state: state for the CRTC connected to the encoder
1980  *
1981  * Sync any state stored in the encoder wrt. HW state during driver init
1982  * and system resume.
1983  */
1984 void intel_dp_sync_state(struct intel_encoder *encoder,
1985                          const struct intel_crtc_state *crtc_state)
1986 {
1987         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1988
1989         /*
1990          * Don't clobber DPCD if it's been already read out during output
1991          * setup (eDP) or detect.
1992          */
1993         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
1994                 intel_dp_get_dpcd(intel_dp);
1995
1996         intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
1997         intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
1998 }
1999
2000 bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
2001                                     struct intel_crtc_state *crtc_state)
2002 {
2003         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2004         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2005
2006         /*
2007          * If BIOS has set an unsupported or non-standard link rate for some
2008          * reason force an encoder recompute and full modeset.
2009          */
2010         if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates,
2011                                 crtc_state->port_clock) < 0) {
2012                 drm_dbg_kms(&i915->drm, "Forcing full modeset due to unsupported link rate\n");
2013                 crtc_state->uapi.connectors_changed = true;
2014                 return false;
2015         }
2016
2017         /*
2018          * FIXME hack to force full modeset when DSC is being used.
2019          *
2020          * As long as we do not have full state readout and config comparison
2021          * of crtc_state->dsc, we have no way to ensure reliable fastset.
2022          * Remove once we have readout for DSC.
2023          */
2024         if (crtc_state->dsc.compression_enable) {
2025                 drm_dbg_kms(&i915->drm, "Forcing full modeset due to DSC being enabled\n");
2026                 crtc_state->uapi.mode_changed = true;
2027                 return false;
2028         }
2029
2030         if (CAN_PSR(intel_dp)) {
2031                 drm_dbg_kms(&i915->drm, "Forcing full modeset to compute PSR state\n");
2032                 crtc_state->uapi.mode_changed = true;
2033                 return false;
2034         }
2035
2036         return true;
2037 }
2038
2039 static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
2040 {
2041         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2042
2043         /* Clear the cached register set to avoid using stale values */
2044
2045         memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd));
2046
2047         if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER,
2048                              intel_dp->pcon_dsc_dpcd,
2049                              sizeof(intel_dp->pcon_dsc_dpcd)) < 0)
2050                 drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n",
2051                         DP_PCON_DSC_ENCODER);
2052
2053         drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n",
2054                     (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd);
2055 }
2056
2057 static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask)
2058 {
2059         int bw_gbps[] = {9, 18, 24, 32, 40, 48};
2060         int i;
2061
2062         for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) {
2063                 if (frl_bw_mask & (1 << i))
2064                         return bw_gbps[i];
2065         }
2066         return 0;
2067 }
2068
2069 static int intel_dp_pcon_set_frl_mask(int max_frl)
2070 {
2071         switch (max_frl) {
2072         case 48:
2073                 return DP_PCON_FRL_BW_MASK_48GBPS;
2074         case 40:
2075                 return DP_PCON_FRL_BW_MASK_40GBPS;
2076         case 32:
2077                 return DP_PCON_FRL_BW_MASK_32GBPS;
2078         case 24:
2079                 return DP_PCON_FRL_BW_MASK_24GBPS;
2080         case 18:
2081                 return DP_PCON_FRL_BW_MASK_18GBPS;
2082         case 9:
2083                 return DP_PCON_FRL_BW_MASK_9GBPS;
2084         }
2085
2086         return 0;
2087 }
2088
2089 static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp)
2090 {
2091         struct intel_connector *intel_connector = intel_dp->attached_connector;
2092         struct drm_connector *connector = &intel_connector->base;
2093         int max_frl_rate;
2094         int max_lanes, rate_per_lane;
2095         int max_dsc_lanes, dsc_rate_per_lane;
2096
2097         max_lanes = connector->display_info.hdmi.max_lanes;
2098         rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane;
2099         max_frl_rate = max_lanes * rate_per_lane;
2100
2101         if (connector->display_info.hdmi.dsc_cap.v_1p2) {
2102                 max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes;
2103                 dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane;
2104                 if (max_dsc_lanes && dsc_rate_per_lane)
2105                         max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane);
2106         }
2107
2108         return max_frl_rate;
2109 }
2110
2111 static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
2112 {
2113 #define TIMEOUT_FRL_READY_MS 500
2114 #define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000
2115
2116         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2117         int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret;
2118         u8 max_frl_bw_mask = 0, frl_trained_mask;
2119         bool is_active;
2120
2121         ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
2122         if (ret < 0)
2123                 return ret;
2124
2125         max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
2126         drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw);
2127
2128         max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp);
2129         drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw);
2130
2131         max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw);
2132
2133         if (max_frl_bw <= 0)
2134                 return -EINVAL;
2135
2136         ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false);
2137         if (ret < 0)
2138                 return ret;
2139         /* Wait for PCON to be FRL Ready */
2140         wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS);
2141
2142         if (!is_active)
2143                 return -ETIMEDOUT;
2144
2145         max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw);
2146         ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw,
2147                                           DP_PCON_ENABLE_SEQUENTIAL_LINK);
2148         if (ret < 0)
2149                 return ret;
2150         ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask,
2151                                           DP_PCON_FRL_LINK_TRAIN_NORMAL);
2152         if (ret < 0)
2153                 return ret;
2154         ret = drm_dp_pcon_frl_enable(&intel_dp->aux);
2155         if (ret < 0)
2156                 return ret;
2157         /*
2158          * Wait for FRL to be completed
2159          * Check if the HDMI Link is up and active.
2160          */
2161         wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS);
2162
2163         if (!is_active)
2164                 return -ETIMEDOUT;
2165
2166         /* Verify HDMI Link configuration shows FRL Mode */
2167         if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) !=
2168             DP_PCON_HDMI_MODE_FRL) {
2169                 drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n");
2170                 return -EINVAL;
2171         }
2172         drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask);
2173
2174         intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask);
2175         intel_dp->frl.is_trained = true;
2176         drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps);
2177
2178         return 0;
2179 }
2180
2181 static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp)
2182 {
2183         if (drm_dp_is_branch(intel_dp->dpcd) &&
2184             intel_dp->has_hdmi_sink &&
2185             intel_dp_hdmi_sink_max_frl(intel_dp) > 0)
2186                 return true;
2187
2188         return false;
2189 }
2190
2191 void intel_dp_check_frl_training(struct intel_dp *intel_dp)
2192 {
2193         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2194
2195         /*
2196          * Always go for FRL training if:
2197          * -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7)
2198          * -sink is HDMI2.1
2199          */
2200         if (!(intel_dp->downstream_ports[2] & DP_PCON_SOURCE_CTL_MODE) ||
2201             !intel_dp_is_hdmi_2_1_sink(intel_dp) ||
2202             intel_dp->frl.is_trained)
2203                 return;
2204
2205         if (intel_dp_pcon_start_frl_training(intel_dp) < 0) {
2206                 int ret, mode;
2207
2208                 drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n");
2209                 ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
2210                 mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL);
2211
2212                 if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS)
2213                         drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n");
2214         } else {
2215                 drm_dbg(&dev_priv->drm, "FRL training Completed\n");
2216         }
2217 }
2218
2219 static int
2220 intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state)
2221 {
2222         int vactive = crtc_state->hw.adjusted_mode.vdisplay;
2223
2224         return intel_hdmi_dsc_get_slice_height(vactive);
2225 }
2226
2227 static int
2228 intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp,
2229                              const struct intel_crtc_state *crtc_state)
2230 {
2231         struct intel_connector *intel_connector = intel_dp->attached_connector;
2232         struct drm_connector *connector = &intel_connector->base;
2233         int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice;
2234         int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices;
2235         int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd);
2236         int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd);
2237
2238         return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices,
2239                                              pcon_max_slice_width,
2240                                              hdmi_max_slices, hdmi_throughput);
2241 }
2242
2243 static int
2244 intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp,
2245                           const struct intel_crtc_state *crtc_state,
2246                           int num_slices, int slice_width)
2247 {
2248         struct intel_connector *intel_connector = intel_dp->attached_connector;
2249         struct drm_connector *connector = &intel_connector->base;
2250         int output_format = crtc_state->output_format;
2251         bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp;
2252         int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd);
2253         int hdmi_max_chunk_bytes =
2254                 connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024;
2255
2256         return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width,
2257                                       num_slices, output_format, hdmi_all_bpp,
2258                                       hdmi_max_chunk_bytes);
2259 }
2260
2261 void
2262 intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
2263                             const struct intel_crtc_state *crtc_state)
2264 {
2265         u8 pps_param[6];
2266         int slice_height;
2267         int slice_width;
2268         int num_slices;
2269         int bits_per_pixel;
2270         int ret;
2271         struct intel_connector *intel_connector = intel_dp->attached_connector;
2272         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2273         struct drm_connector *connector;
2274         bool hdmi_is_dsc_1_2;
2275
2276         if (!intel_dp_is_hdmi_2_1_sink(intel_dp))
2277                 return;
2278
2279         if (!intel_connector)
2280                 return;
2281         connector = &intel_connector->base;
2282         hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2;
2283
2284         if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) ||
2285             !hdmi_is_dsc_1_2)
2286                 return;
2287
2288         slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state);
2289         if (!slice_height)
2290                 return;
2291
2292         num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state);
2293         if (!num_slices)
2294                 return;
2295
2296         slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay,
2297                                    num_slices);
2298
2299         bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state,
2300                                                    num_slices, slice_width);
2301         if (!bits_per_pixel)
2302                 return;
2303
2304         pps_param[0] = slice_height & 0xFF;
2305         pps_param[1] = slice_height >> 8;
2306         pps_param[2] = slice_width & 0xFF;
2307         pps_param[3] = slice_width >> 8;
2308         pps_param[4] = bits_per_pixel & 0xFF;
2309         pps_param[5] = (bits_per_pixel >> 8) & 0x3;
2310
2311         ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param);
2312         if (ret < 0)
2313                 drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n");
2314 }
2315
2316 void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
2317                                            const struct intel_crtc_state *crtc_state)
2318 {
2319         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2320         u8 tmp;
2321
2322         if (intel_dp->dpcd[DP_DPCD_REV] < 0x13)
2323                 return;
2324
2325         if (!drm_dp_is_branch(intel_dp->dpcd))
2326                 return;
2327
2328         tmp = intel_dp->has_hdmi_sink ?
2329                 DP_HDMI_DVI_OUTPUT_CONFIG : 0;
2330
2331         if (drm_dp_dpcd_writeb(&intel_dp->aux,
2332                                DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1)
2333                 drm_dbg_kms(&i915->drm, "Failed to %s protocol converter HDMI mode\n",
2334                             enabledisable(intel_dp->has_hdmi_sink));
2335
2336         tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
2337                 intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
2338
2339         if (drm_dp_dpcd_writeb(&intel_dp->aux,
2340                                DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
2341                 drm_dbg_kms(&i915->drm,
2342                             "Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n",
2343                             enabledisable(intel_dp->dfp.ycbcr_444_to_420));
2344
2345         tmp = 0;
2346         if (intel_dp->dfp.rgb_to_ycbcr) {
2347                 bool bt2020, bt709;
2348
2349                 /*
2350                  * FIXME: Currently if userspace selects BT2020 or BT709, but PCON supports only
2351                  * RGB->YCbCr for BT601 colorspace, we go ahead with BT601, as default.
2352                  *
2353                  */
2354                 tmp = DP_CONVERSION_BT601_RGB_YCBCR_ENABLE;
2355
2356                 bt2020 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
2357                                                                    intel_dp->downstream_ports,
2358                                                                    DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
2359                 bt709 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
2360                                                                   intel_dp->downstream_ports,
2361                                                                   DP_DS_HDMI_BT709_RGB_YCBCR_CONV);
2362                 switch (crtc_state->infoframes.vsc.colorimetry) {
2363                 case DP_COLORIMETRY_BT2020_RGB:
2364                 case DP_COLORIMETRY_BT2020_YCC:
2365                         if (bt2020)
2366                                 tmp = DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE;
2367                         break;
2368                 case DP_COLORIMETRY_BT709_YCC:
2369                 case DP_COLORIMETRY_XVYCC_709:
2370                         if (bt709)
2371                                 tmp = DP_CONVERSION_BT709_RGB_YCBCR_ENABLE;
2372                         break;
2373                 default:
2374                         break;
2375                 }
2376         }
2377
2378         if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0)
2379                 drm_dbg_kms(&i915->drm,
2380                            "Failed to %s protocol converter RGB->YCbCr conversion mode\n",
2381                            enabledisable(tmp));
2382 }
2383
2384
2385 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
2386 {
2387         u8 dprx = 0;
2388
2389         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
2390                               &dprx) != 1)
2391                 return false;
2392         return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
2393 }
2394
2395 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
2396 {
2397         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2398
2399         /*
2400          * Clear the cached register set to avoid using stale values
2401          * for the sinks that do not support DSC.
2402          */
2403         memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
2404
2405         /* Clear fec_capable to avoid using stale values */
2406         intel_dp->fec_capable = 0;
2407
2408         /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
2409         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
2410             intel_dp->edp_dpcd[0] >= DP_EDP_14) {
2411                 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
2412                                      intel_dp->dsc_dpcd,
2413                                      sizeof(intel_dp->dsc_dpcd)) < 0)
2414                         drm_err(&i915->drm,
2415                                 "Failed to read DPCD register 0x%x\n",
2416                                 DP_DSC_SUPPORT);
2417
2418                 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n",
2419                             (int)sizeof(intel_dp->dsc_dpcd),
2420                             intel_dp->dsc_dpcd);
2421
2422                 /* FEC is supported only on DP 1.4 */
2423                 if (!intel_dp_is_edp(intel_dp) &&
2424                     drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
2425                                       &intel_dp->fec_capable) < 0)
2426                         drm_err(&i915->drm,
2427                                 "Failed to read FEC DPCD register\n");
2428
2429                 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n",
2430                             intel_dp->fec_capable);
2431         }
2432 }
2433
2434 static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
2435                                      struct drm_display_mode *mode)
2436 {
2437         struct intel_dp *intel_dp = intel_attached_dp(connector);
2438         struct drm_i915_private *i915 = to_i915(connector->base.dev);
2439         int n = intel_dp->mso_link_count;
2440         int overlap = intel_dp->mso_pixel_overlap;
2441
2442         if (!mode || !n)
2443                 return;
2444
2445         mode->hdisplay = (mode->hdisplay - overlap) * n;
2446         mode->hsync_start = (mode->hsync_start - overlap) * n;
2447         mode->hsync_end = (mode->hsync_end - overlap) * n;
2448         mode->htotal = (mode->htotal - overlap) * n;
2449         mode->clock *= n;
2450
2451         drm_mode_set_name(mode);
2452
2453         drm_dbg_kms(&i915->drm,
2454                     "[CONNECTOR:%d:%s] using generated MSO mode: ",
2455                     connector->base.base.id, connector->base.name);
2456         drm_mode_debug_printmodeline(mode);
2457 }
2458
2459 static void intel_edp_mso_init(struct intel_dp *intel_dp)
2460 {
2461         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2462         u8 mso;
2463
2464         if (intel_dp->edp_dpcd[0] < DP_EDP_14)
2465                 return;
2466
2467         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) {
2468                 drm_err(&i915->drm, "Failed to read MSO cap\n");
2469                 return;
2470         }
2471
2472         /* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */
2473         mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK;
2474         if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) {
2475                 drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso);
2476                 mso = 0;
2477         }
2478
2479         if (mso) {
2480                 drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration\n",
2481                             mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso);
2482                 if (!HAS_MSO(i915)) {
2483                         drm_err(&i915->drm, "No source MSO support, disabling\n");
2484                         mso = 0;
2485                 }
2486         }
2487
2488         intel_dp->mso_link_count = mso;
2489         intel_dp->mso_pixel_overlap = 0; /* FIXME: read from DisplayID v2.0 */
2490 }
2491
2492 static bool
2493 intel_edp_init_dpcd(struct intel_dp *intel_dp)
2494 {
2495         struct drm_i915_private *dev_priv =
2496                 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
2497
2498         /* this function is meant to be called only once */
2499         drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
2500
2501         if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0)
2502                 return false;
2503
2504         drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
2505                          drm_dp_is_branch(intel_dp->dpcd));
2506
2507         /*
2508          * Read the eDP display control registers.
2509          *
2510          * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
2511          * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
2512          * set, but require eDP 1.4+ detection (e.g. for supported link rates
2513          * method). The display control registers should read zero if they're
2514          * not supported anyway.
2515          */
2516         if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
2517                              intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
2518                              sizeof(intel_dp->edp_dpcd)) {
2519                 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
2520                             (int)sizeof(intel_dp->edp_dpcd),
2521                             intel_dp->edp_dpcd);
2522
2523                 intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14;
2524         }
2525
2526         /*
2527          * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
2528          * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
2529          */
2530         intel_psr_init_dpcd(intel_dp);
2531
2532         /* Read the eDP 1.4+ supported link rates. */
2533         if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
2534                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
2535                 int i;
2536
2537                 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
2538                                 sink_rates, sizeof(sink_rates));
2539
2540                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
2541                         int val = le16_to_cpu(sink_rates[i]);
2542
2543                         if (val == 0)
2544                                 break;
2545
2546                         /* Value read multiplied by 200kHz gives the per-lane
2547                          * link rate in kHz. The source rates are, however,
2548                          * stored in terms of LS_Clk kHz. The full conversion
2549                          * back to symbols is
2550                          * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
2551                          */
2552                         intel_dp->sink_rates[i] = (val * 200) / 10;
2553                 }
2554                 intel_dp->num_sink_rates = i;
2555         }
2556
2557         /*
2558          * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
2559          * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
2560          */
2561         if (intel_dp->num_sink_rates)
2562                 intel_dp->use_rate_select = true;
2563         else
2564                 intel_dp_set_sink_rates(intel_dp);
2565
2566         intel_dp_set_common_rates(intel_dp);
2567
2568         /* Read the eDP DSC DPCD registers */
2569         if (DISPLAY_VER(dev_priv) >= 10)
2570                 intel_dp_get_dsc_sink_cap(intel_dp);
2571
2572         /*
2573          * If needed, program our source OUI so we can make various Intel-specific AUX services
2574          * available (such as HDR backlight controls)
2575          */
2576         intel_edp_init_source_oui(intel_dp, true);
2577
2578         intel_edp_mso_init(intel_dp);
2579
2580         return true;
2581 }
2582
2583 static bool
2584 intel_dp_has_sink_count(struct intel_dp *intel_dp)
2585 {
2586         if (!intel_dp->attached_connector)
2587                 return false;
2588
2589         return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base,
2590                                           intel_dp->dpcd,
2591                                           &intel_dp->desc);
2592 }
2593
2594 static bool
2595 intel_dp_get_dpcd(struct intel_dp *intel_dp)
2596 {
2597         int ret;
2598
2599         if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0)
2600                 return false;
2601
2602         /*
2603          * Don't clobber cached eDP rates. Also skip re-reading
2604          * the OUI/ID since we know it won't change.
2605          */
2606         if (!intel_dp_is_edp(intel_dp)) {
2607                 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
2608                                  drm_dp_is_branch(intel_dp->dpcd));
2609
2610                 intel_dp_set_sink_rates(intel_dp);
2611                 intel_dp_set_common_rates(intel_dp);
2612         }
2613
2614         if (intel_dp_has_sink_count(intel_dp)) {
2615                 ret = drm_dp_read_sink_count(&intel_dp->aux);
2616                 if (ret < 0)
2617                         return false;
2618
2619                 /*
2620                  * Sink count can change between short pulse hpd hence
2621                  * a member variable in intel_dp will track any changes
2622                  * between short pulse interrupts.
2623                  */
2624                 intel_dp->sink_count = ret;
2625
2626                 /*
2627                  * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
2628                  * a dongle is present but no display. Unless we require to know
2629                  * if a dongle is present or not, we don't need to update
2630                  * downstream port information. So, an early return here saves
2631                  * time from performing other operations which are not required.
2632                  */
2633                 if (!intel_dp->sink_count)
2634                         return false;
2635         }
2636
2637         return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd,
2638                                            intel_dp->downstream_ports) == 0;
2639 }
2640
2641 static bool
2642 intel_dp_can_mst(struct intel_dp *intel_dp)
2643 {
2644         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2645
2646         return i915->params.enable_dp_mst &&
2647                 intel_dp->can_mst &&
2648                 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
2649 }
2650
2651 static void
2652 intel_dp_configure_mst(struct intel_dp *intel_dp)
2653 {
2654         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2655         struct intel_encoder *encoder =
2656                 &dp_to_dig_port(intel_dp)->base;
2657         bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
2658
2659         drm_dbg_kms(&i915->drm,
2660                     "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
2661                     encoder->base.base.id, encoder->base.name,
2662                     yesno(intel_dp->can_mst), yesno(sink_can_mst),
2663                     yesno(i915->params.enable_dp_mst));
2664
2665         if (!intel_dp->can_mst)
2666                 return;
2667
2668         intel_dp->is_mst = sink_can_mst &&
2669                 i915->params.enable_dp_mst;
2670
2671         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
2672                                         intel_dp->is_mst);
2673 }
2674
2675 static bool
2676 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
2677 {
2678         return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
2679                                 sink_irq_vector, DP_DPRX_ESI_LEN) ==
2680                 DP_DPRX_ESI_LEN;
2681 }
2682
2683 bool
2684 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
2685                        const struct drm_connector_state *conn_state)
2686 {
2687         /*
2688          * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
2689          * of Color Encoding Format and Content Color Gamut], in order to
2690          * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP.
2691          */
2692         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2693                 return true;
2694
2695         switch (conn_state->colorspace) {
2696         case DRM_MODE_COLORIMETRY_SYCC_601:
2697         case DRM_MODE_COLORIMETRY_OPYCC_601:
2698         case DRM_MODE_COLORIMETRY_BT2020_YCC:
2699         case DRM_MODE_COLORIMETRY_BT2020_RGB:
2700         case DRM_MODE_COLORIMETRY_BT2020_CYCC:
2701                 return true;
2702         default:
2703                 break;
2704         }
2705
2706         return false;
2707 }
2708
2709 static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
2710                                      struct dp_sdp *sdp, size_t size)
2711 {
2712         size_t length = sizeof(struct dp_sdp);
2713
2714         if (size < length)
2715                 return -ENOSPC;
2716
2717         memset(sdp, 0, size);
2718
2719         /*
2720          * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119
2721          * VSC SDP Header Bytes
2722          */
2723         sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */
2724         sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */
2725         sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
2726         sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
2727
2728         /*
2729          * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as
2730          * per DP 1.4a spec.
2731          */
2732         if (vsc->revision != 0x5)
2733                 goto out;
2734
2735         /* VSC SDP Payload for DB16 through DB18 */
2736         /* Pixel Encoding and Colorimetry Formats  */
2737         sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */
2738         sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */
2739
2740         switch (vsc->bpc) {
2741         case 6:
2742                 /* 6bpc: 0x0 */
2743                 break;
2744         case 8:
2745                 sdp->db[17] = 0x1; /* DB17[3:0] */
2746                 break;
2747         case 10:
2748                 sdp->db[17] = 0x2;
2749                 break;
2750         case 12:
2751                 sdp->db[17] = 0x3;
2752                 break;
2753         case 16:
2754                 sdp->db[17] = 0x4;
2755                 break;
2756         default:
2757                 MISSING_CASE(vsc->bpc);
2758                 break;
2759         }
2760         /* Dynamic Range and Component Bit Depth */
2761         if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA)
2762                 sdp->db[17] |= 0x80;  /* DB17[7] */
2763
2764         /* Content Type */
2765         sdp->db[18] = vsc->content_type & 0x7;
2766
2767 out:
2768         return length;
2769 }
2770
2771 static ssize_t
2772 intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe,
2773                                          struct dp_sdp *sdp,
2774                                          size_t size)
2775 {
2776         size_t length = sizeof(struct dp_sdp);
2777         const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
2778         unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
2779         ssize_t len;
2780
2781         if (size < length)
2782                 return -ENOSPC;
2783
2784         memset(sdp, 0, size);
2785
2786         len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf));
2787         if (len < 0) {
2788                 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n");
2789                 return -ENOSPC;
2790         }
2791
2792         if (len != infoframe_size) {
2793                 DRM_DEBUG_KMS("wrong static hdr metadata size\n");
2794                 return -ENOSPC;
2795         }
2796
2797         /*
2798          * Set up the infoframe sdp packet for HDR static metadata.
2799          * Prepare VSC Header for SU as per DP 1.4a spec,
2800          * Table 2-100 and Table 2-101
2801          */
2802
2803         /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */
2804         sdp->sdp_header.HB0 = 0;
2805         /*
2806          * Packet Type 80h + Non-audio INFOFRAME Type value
2807          * HDMI_INFOFRAME_TYPE_DRM: 0x87
2808          * - 80h + Non-audio INFOFRAME Type value
2809          * - InfoFrame Type: 0x07
2810          *    [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame]
2811          */
2812         sdp->sdp_header.HB1 = drm_infoframe->type;
2813         /*
2814          * Least Significant Eight Bits of (Data Byte Count – 1)
2815          * infoframe_size - 1
2816          */
2817         sdp->sdp_header.HB2 = 0x1D;
2818         /* INFOFRAME SDP Version Number */
2819         sdp->sdp_header.HB3 = (0x13 << 2);
2820         /* CTA Header Byte 2 (INFOFRAME Version Number) */
2821         sdp->db[0] = drm_infoframe->version;
2822         /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
2823         sdp->db[1] = drm_infoframe->length;
2824         /*
2825          * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
2826          * HDMI_INFOFRAME_HEADER_SIZE
2827          */
2828         BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2);
2829         memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
2830                HDMI_DRM_INFOFRAME_SIZE);
2831
2832         /*
2833          * Size of DP infoframe sdp packet for HDR static metadata consists of
2834          * - DP SDP Header(struct dp_sdp_header): 4 bytes
2835          * - Two Data Blocks: 2 bytes
2836          *    CTA Header Byte2 (INFOFRAME Version Number)
2837          *    CTA Header Byte3 (Length of INFOFRAME)
2838          * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes
2839          *
2840          * Prior to GEN11's GMP register size is identical to DP HDR static metadata
2841          * infoframe size. But GEN11+ has larger than that size, write_infoframe
2842          * will pad rest of the size.
2843          */
2844         return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE;
2845 }
2846
2847 static void intel_write_dp_sdp(struct intel_encoder *encoder,
2848                                const struct intel_crtc_state *crtc_state,
2849                                unsigned int type)
2850 {
2851         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
2852         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2853         struct dp_sdp sdp = {};
2854         ssize_t len;
2855
2856         if ((crtc_state->infoframes.enable &
2857              intel_hdmi_infoframe_enable(type)) == 0)
2858                 return;
2859
2860         switch (type) {
2861         case DP_SDP_VSC:
2862                 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp,
2863                                             sizeof(sdp));
2864                 break;
2865         case HDMI_PACKET_TYPE_GAMUT_METADATA:
2866                 len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm,
2867                                                                &sdp, sizeof(sdp));
2868                 break;
2869         default:
2870                 MISSING_CASE(type);
2871                 return;
2872         }
2873
2874         if (drm_WARN_ON(&dev_priv->drm, len < 0))
2875                 return;
2876
2877         dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
2878 }
2879
2880 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
2881                             const struct intel_crtc_state *crtc_state,
2882                             struct drm_dp_vsc_sdp *vsc)
2883 {
2884         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
2885         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2886         struct dp_sdp sdp = {};
2887         ssize_t len;
2888
2889         len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp));
2890
2891         if (drm_WARN_ON(&dev_priv->drm, len < 0))
2892                 return;
2893
2894         dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
2895                                         &sdp, len);
2896 }
2897
2898 void intel_dp_set_infoframes(struct intel_encoder *encoder,
2899                              bool enable,
2900                              const struct intel_crtc_state *crtc_state,
2901                              const struct drm_connector_state *conn_state)
2902 {
2903         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2904         i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
2905         u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
2906                          VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
2907                          VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
2908         u32 val = intel_de_read(dev_priv, reg) & ~dip_enable;
2909
2910         /* TODO: Add DSC case (DIP_ENABLE_PPS) */
2911         /* When PSR is enabled, this routine doesn't disable VSC DIP */
2912         if (!crtc_state->has_psr)
2913                 val &= ~VIDEO_DIP_ENABLE_VSC_HSW;
2914
2915         intel_de_write(dev_priv, reg, val);
2916         intel_de_posting_read(dev_priv, reg);
2917
2918         if (!enable)
2919                 return;
2920
2921         /* When PSR is enabled, VSC SDP is handled by PSR routine */
2922         if (!crtc_state->has_psr)
2923                 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
2924
2925         intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
2926 }
2927
2928 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
2929                                    const void *buffer, size_t size)
2930 {
2931         const struct dp_sdp *sdp = buffer;
2932
2933         if (size < sizeof(struct dp_sdp))
2934                 return -EINVAL;
2935
2936         memset(vsc, 0, sizeof(*vsc));
2937
2938         if (sdp->sdp_header.HB0 != 0)
2939                 return -EINVAL;
2940
2941         if (sdp->sdp_header.HB1 != DP_SDP_VSC)
2942                 return -EINVAL;
2943
2944         vsc->sdp_type = sdp->sdp_header.HB1;
2945         vsc->revision = sdp->sdp_header.HB2;
2946         vsc->length = sdp->sdp_header.HB3;
2947
2948         if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) ||
2949             (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) {
2950                 /*
2951                  * - HB2 = 0x2, HB3 = 0x8
2952                  *   VSC SDP supporting 3D stereo + PSR
2953                  * - HB2 = 0x4, HB3 = 0xe
2954                  *   VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of
2955                  *   first scan line of the SU region (applies to eDP v1.4b
2956                  *   and higher).
2957                  */
2958                 return 0;
2959         } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) {
2960                 /*
2961                  * - HB2 = 0x5, HB3 = 0x13
2962                  *   VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry
2963                  *   Format.
2964                  */
2965                 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf;
2966                 vsc->colorimetry = sdp->db[16] & 0xf;
2967                 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1;
2968
2969                 switch (sdp->db[17] & 0x7) {
2970                 case 0x0:
2971                         vsc->bpc = 6;
2972                         break;
2973                 case 0x1:
2974                         vsc->bpc = 8;
2975                         break;
2976                 case 0x2:
2977                         vsc->bpc = 10;
2978                         break;
2979                 case 0x3:
2980                         vsc->bpc = 12;
2981                         break;
2982                 case 0x4:
2983                         vsc->bpc = 16;
2984                         break;
2985                 default:
2986                         MISSING_CASE(sdp->db[17] & 0x7);
2987                         return -EINVAL;
2988                 }
2989
2990                 vsc->content_type = sdp->db[18] & 0x7;
2991         } else {
2992                 return -EINVAL;
2993         }
2994
2995         return 0;
2996 }
2997
2998 static int
2999 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe,
3000                                            const void *buffer, size_t size)
3001 {
3002         int ret;
3003
3004         const struct dp_sdp *sdp = buffer;
3005
3006         if (size < sizeof(struct dp_sdp))
3007                 return -EINVAL;
3008
3009         if (sdp->sdp_header.HB0 != 0)
3010                 return -EINVAL;
3011
3012         if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM)
3013                 return -EINVAL;
3014
3015         /*
3016          * Least Significant Eight Bits of (Data Byte Count – 1)
3017          * 1Dh (i.e., Data Byte Count = 30 bytes).
3018          */
3019         if (sdp->sdp_header.HB2 != 0x1D)
3020                 return -EINVAL;
3021
3022         /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */
3023         if ((sdp->sdp_header.HB3 & 0x3) != 0)
3024                 return -EINVAL;
3025
3026         /* INFOFRAME SDP Version Number */
3027         if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13)
3028                 return -EINVAL;
3029
3030         /* CTA Header Byte 2 (INFOFRAME Version Number) */
3031         if (sdp->db[0] != 1)
3032                 return -EINVAL;
3033
3034         /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
3035         if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE)
3036                 return -EINVAL;
3037
3038         ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2],
3039                                              HDMI_DRM_INFOFRAME_SIZE);
3040
3041         return ret;
3042 }
3043
3044 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
3045                                   struct intel_crtc_state *crtc_state,
3046                                   struct drm_dp_vsc_sdp *vsc)
3047 {
3048         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
3049         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3050         unsigned int type = DP_SDP_VSC;
3051         struct dp_sdp sdp = {};
3052         int ret;
3053
3054         /* When PSR is enabled, VSC SDP is handled by PSR routine */
3055         if (crtc_state->has_psr)
3056                 return;
3057
3058         if ((crtc_state->infoframes.enable &
3059              intel_hdmi_infoframe_enable(type)) == 0)
3060                 return;
3061
3062         dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
3063
3064         ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));
3065
3066         if (ret)
3067                 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n");
3068 }
3069
3070 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder,
3071                                                      struct intel_crtc_state *crtc_state,
3072                                                      struct hdmi_drm_infoframe *drm_infoframe)
3073 {
3074         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
3075         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3076         unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
3077         struct dp_sdp sdp = {};
3078         int ret;
3079
3080         if ((crtc_state->infoframes.enable &
3081             intel_hdmi_infoframe_enable(type)) == 0)
3082                 return;
3083
3084         dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
3085                                  sizeof(sdp));
3086
3087         ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp,
3088                                                          sizeof(sdp));
3089
3090         if (ret)
3091                 drm_dbg_kms(&dev_priv->drm,
3092                             "Failed to unpack DP HDR Metadata Infoframe SDP\n");
3093 }
3094
3095 void intel_read_dp_sdp(struct intel_encoder *encoder,
3096                        struct intel_crtc_state *crtc_state,
3097                        unsigned int type)
3098 {
3099         switch (type) {
3100         case DP_SDP_VSC:
3101                 intel_read_dp_vsc_sdp(encoder, crtc_state,
3102                                       &crtc_state->infoframes.vsc);
3103                 break;
3104         case HDMI_PACKET_TYPE_GAMUT_METADATA:
3105                 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state,
3106                                                          &crtc_state->infoframes.drm.drm);
3107                 break;
3108         default:
3109                 MISSING_CASE(type);
3110                 break;
3111         }
3112 }
3113
3114 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
3115 {
3116         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3117         int status = 0;
3118         int test_link_rate;
3119         u8 test_lane_count, test_link_bw;
3120         /* (DP CTS 1.2)
3121          * 4.3.1.11
3122          */
3123         /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
3124         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
3125                                    &test_lane_count);
3126
3127         if (status <= 0) {
3128                 drm_dbg_kms(&i915->drm, "Lane count read failed\n");
3129                 return DP_TEST_NAK;
3130         }
3131         test_lane_count &= DP_MAX_LANE_COUNT_MASK;
3132
3133         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
3134                                    &test_link_bw);
3135         if (status <= 0) {
3136                 drm_dbg_kms(&i915->drm, "Link Rate read failed\n");
3137                 return DP_TEST_NAK;
3138         }
3139         test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
3140
3141         /* Validate the requested link rate and lane count */
3142         if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
3143                                         test_lane_count))
3144                 return DP_TEST_NAK;
3145
3146         intel_dp->compliance.test_lane_count = test_lane_count;
3147         intel_dp->compliance.test_link_rate = test_link_rate;
3148
3149         return DP_TEST_ACK;
3150 }
3151
3152 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
3153 {
3154         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3155         u8 test_pattern;
3156         u8 test_misc;
3157         __be16 h_width, v_height;
3158         int status = 0;
3159
3160         /* Read the TEST_PATTERN (DP CTS 3.1.5) */
3161         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
3162                                    &test_pattern);
3163         if (status <= 0) {
3164                 drm_dbg_kms(&i915->drm, "Test pattern read failed\n");
3165                 return DP_TEST_NAK;
3166         }
3167         if (test_pattern != DP_COLOR_RAMP)
3168                 return DP_TEST_NAK;
3169
3170         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
3171                                   &h_width, 2);
3172         if (status <= 0) {
3173                 drm_dbg_kms(&i915->drm, "H Width read failed\n");
3174                 return DP_TEST_NAK;
3175         }
3176
3177         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
3178                                   &v_height, 2);
3179         if (status <= 0) {
3180                 drm_dbg_kms(&i915->drm, "V Height read failed\n");
3181                 return DP_TEST_NAK;
3182         }
3183
3184         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
3185                                    &test_misc);
3186         if (status <= 0) {
3187                 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n");
3188                 return DP_TEST_NAK;
3189         }
3190         if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
3191                 return DP_TEST_NAK;
3192         if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
3193                 return DP_TEST_NAK;
3194         switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
3195         case DP_TEST_BIT_DEPTH_6:
3196                 intel_dp->compliance.test_data.bpc = 6;
3197                 break;
3198         case DP_TEST_BIT_DEPTH_8:
3199                 intel_dp->compliance.test_data.bpc = 8;
3200                 break;
3201         default:
3202                 return DP_TEST_NAK;
3203         }
3204
3205         intel_dp->compliance.test_data.video_pattern = test_pattern;
3206         intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
3207         intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
3208         /* Set test active flag here so userspace doesn't interrupt things */
3209         intel_dp->compliance.test_active = true;
3210
3211         return DP_TEST_ACK;
3212 }
3213
3214 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
3215 {
3216         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3217         u8 test_result = DP_TEST_ACK;
3218         struct intel_connector *intel_connector = intel_dp->attached_connector;
3219         struct drm_connector *connector = &intel_connector->base;
3220
3221         if (intel_connector->detect_edid == NULL ||
3222             connector->edid_corrupt ||
3223             intel_dp->aux.i2c_defer_count > 6) {
3224                 /* Check EDID read for NACKs, DEFERs and corruption
3225                  * (DP CTS 1.2 Core r1.1)
3226                  *    4.2.2.4 : Failed EDID read, I2C_NAK
3227                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
3228                  *    4.2.2.6 : EDID corruption detected
3229                  * Use failsafe mode for all cases
3230                  */
3231                 if (intel_dp->aux.i2c_nack_count > 0 ||
3232                         intel_dp->aux.i2c_defer_count > 0)
3233                         drm_dbg_kms(&i915->drm,
3234                                     "EDID read had %d NACKs, %d DEFERs\n",
3235                                     intel_dp->aux.i2c_nack_count,
3236                                     intel_dp->aux.i2c_defer_count);
3237                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
3238         } else {
3239                 struct edid *block = intel_connector->detect_edid;
3240
3241                 /* We have to write the checksum
3242                  * of the last block read
3243                  */
3244                 block += intel_connector->detect_edid->extensions;
3245
3246                 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
3247                                        block->checksum) <= 0)
3248                         drm_dbg_kms(&i915->drm,
3249                                     "Failed to write EDID checksum\n");
3250
3251                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
3252                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
3253         }
3254
3255         /* Set test active flag here so userspace doesn't interrupt things */
3256         intel_dp->compliance.test_active = true;
3257
3258         return test_result;
3259 }
3260
3261 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
3262                                         const struct intel_crtc_state *crtc_state)
3263 {
3264         struct drm_i915_private *dev_priv =
3265                         to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
3266         struct drm_dp_phy_test_params *data =
3267                         &intel_dp->compliance.test_data.phytest;
3268         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3269         enum pipe pipe = crtc->pipe;
3270         u32 pattern_val;
3271
3272         switch (data->phy_pattern) {
3273         case DP_PHY_TEST_PATTERN_NONE:
3274                 DRM_DEBUG_KMS("Disable Phy Test Pattern\n");
3275                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
3276                 break;
3277         case DP_PHY_TEST_PATTERN_D10_2:
3278                 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n");
3279                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
3280                                DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
3281                 break;
3282         case DP_PHY_TEST_PATTERN_ERROR_COUNT:
3283                 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n");
3284                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
3285                                DDI_DP_COMP_CTL_ENABLE |
3286                                DDI_DP_COMP_CTL_SCRAMBLED_0);
3287                 break;
3288         case DP_PHY_TEST_PATTERN_PRBS7:
3289                 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n");
3290                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
3291                                DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
3292                 break;
3293         case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
3294                 /*
3295                  * FIXME: Ideally pattern should come from DPCD 0x250. As
3296                  * current firmware of DPR-100 could not set it, so hardcoding
3297                  * now for complaince test.
3298                  */
3299                 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
3300                 pattern_val = 0x3e0f83e0;
3301                 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val);
3302                 pattern_val = 0x0f83e0f8;
3303                 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val);
3304                 pattern_val = 0x0000f83e;
3305                 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val);
3306                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
3307                                DDI_DP_COMP_CTL_ENABLE |
3308                                DDI_DP_COMP_CTL_CUSTOM80);
3309                 break;
3310         case DP_PHY_TEST_PATTERN_CP2520:
3311                 /*
3312                  * FIXME: Ideally pattern should come from DPCD 0x24A. As
3313                  * current firmware of DPR-100 could not set it, so hardcoding
3314                  * now for complaince test.
3315                  */
3316                 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n");
3317                 pattern_val = 0xFB;
3318                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
3319                                DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
3320                                pattern_val);
3321                 break;
3322         default:
3323                 WARN(1, "Invalid Phy Test Pattern\n");
3324         }
3325 }
3326
3327 static void
3328 intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp,
3329                                   const struct intel_crtc_state *crtc_state)
3330 {
3331         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3332         struct drm_device *dev = dig_port->base.base.dev;
3333         struct drm_i915_private *dev_priv = to_i915(dev);
3334         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
3335         enum pipe pipe = crtc->pipe;
3336         u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
3337
3338         trans_ddi_func_ctl_value = intel_de_read(dev_priv,
3339                                                  TRANS_DDI_FUNC_CTL(pipe));
3340         trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
3341         dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
3342
3343         trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE |
3344                                       TGL_TRANS_DDI_PORT_MASK);
3345         trans_conf_value &= ~PIPECONF_ENABLE;
3346         dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE;
3347
3348         intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
3349         intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
3350                        trans_ddi_func_ctl_value);
3351         intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
3352 }
3353
3354 static void
3355 intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp,
3356                                  const struct intel_crtc_state *crtc_state)
3357 {
3358         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3359         struct drm_device *dev = dig_port->base.base.dev;
3360         struct drm_i915_private *dev_priv = to_i915(dev);
3361         enum port port = dig_port->base.port;
3362         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
3363         enum pipe pipe = crtc->pipe;
3364         u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
3365
3366         trans_ddi_func_ctl_value = intel_de_read(dev_priv,
3367                                                  TRANS_DDI_FUNC_CTL(pipe));
3368         trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
3369         dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
3370
3371         trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE |
3372                                     TGL_TRANS_DDI_SELECT_PORT(port);
3373         trans_conf_value |= PIPECONF_ENABLE;
3374         dp_tp_ctl_value |= DP_TP_CTL_ENABLE;
3375
3376         intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
3377         intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
3378         intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
3379                        trans_ddi_func_ctl_value);
3380 }
3381
3382 static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
3383                                          const struct intel_crtc_state *crtc_state)
3384 {
3385         struct drm_dp_phy_test_params *data =
3386                 &intel_dp->compliance.test_data.phytest;
3387         u8 link_status[DP_LINK_STATUS_SIZE];
3388
3389         if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
3390                                              link_status) < 0) {
3391                 DRM_DEBUG_KMS("failed to get link status\n");
3392                 return;
3393         }
3394
3395         /* retrieve vswing & pre-emphasis setting */
3396         intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX,
3397                                   link_status);
3398
3399         intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state);
3400
3401         intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
3402
3403         intel_dp_phy_pattern_update(intel_dp, crtc_state);
3404
3405         intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state);
3406
3407         drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3408                           intel_dp->train_set, crtc_state->lane_count);
3409
3410         drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
3411                                     link_status[DP_DPCD_REV]);
3412 }
3413
3414 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
3415 {
3416         struct drm_dp_phy_test_params *data =
3417                 &intel_dp->compliance.test_data.phytest;
3418
3419         if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) {
3420                 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n");
3421                 return DP_TEST_NAK;
3422         }
3423
3424         /* Set test active flag here so userspace doesn't interrupt things */
3425         intel_dp->compliance.test_active = true;
3426
3427         return DP_TEST_ACK;
3428 }
3429
3430 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
3431 {
3432         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3433         u8 response = DP_TEST_NAK;
3434         u8 request = 0;
3435         int status;
3436
3437         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
3438         if (status <= 0) {
3439                 drm_dbg_kms(&i915->drm,
3440                             "Could not read test request from sink\n");
3441                 goto update_status;
3442         }
3443
3444         switch (request) {
3445         case DP_TEST_LINK_TRAINING:
3446                 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n");
3447                 response = intel_dp_autotest_link_training(intel_dp);
3448                 break;
3449         case DP_TEST_LINK_VIDEO_PATTERN:
3450                 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n");
3451                 response = intel_dp_autotest_video_pattern(intel_dp);
3452                 break;
3453         case DP_TEST_LINK_EDID_READ:
3454                 drm_dbg_kms(&i915->drm, "EDID test requested\n");
3455                 response = intel_dp_autotest_edid(intel_dp);
3456                 break;
3457         case DP_TEST_LINK_PHY_TEST_PATTERN:
3458                 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n");
3459                 response = intel_dp_autotest_phy_pattern(intel_dp);
3460                 break;
3461         default:
3462                 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n",
3463                             request);
3464                 break;
3465         }
3466
3467         if (response & DP_TEST_ACK)
3468                 intel_dp->compliance.test_type = request;
3469
3470 update_status:
3471         status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
3472         if (status <= 0)
3473                 drm_dbg_kms(&i915->drm,
3474                             "Could not write test response to sink\n");
3475 }
3476
3477 static void
3478 intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, bool *handled)
3479 {
3480                 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, handled);
3481
3482                 if (esi[1] & DP_CP_IRQ) {
3483                         intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
3484                         *handled = true;
3485                 }
3486 }
3487
3488 /**
3489  * intel_dp_check_mst_status - service any pending MST interrupts, check link status
3490  * @intel_dp: Intel DP struct
3491  *
3492  * Read any pending MST interrupts, call MST core to handle these and ack the
3493  * interrupts. Check if the main and AUX link state is ok.
3494  *
3495  * Returns:
3496  * - %true if pending interrupts were serviced (or no interrupts were
3497  *   pending) w/o detecting an error condition.
3498  * - %false if an error condition - like AUX failure or a loss of link - is
3499  *   detected, which needs servicing from the hotplug work.
3500  */
3501 static bool
3502 intel_dp_check_mst_status(struct intel_dp *intel_dp)
3503 {
3504         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3505         bool link_ok = true;
3506
3507         drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
3508
3509         for (;;) {
3510                 /*
3511                  * The +2 is because DP_DPRX_ESI_LEN is 14, but we then
3512                  * pass in "esi+10" to drm_dp_channel_eq_ok(), which
3513                  * takes a 6-byte array. So we actually need 16 bytes
3514                  * here.
3515                  *
3516                  * Somebody who knows what the limits actually are
3517                  * should check this, but for now this is at least
3518                  * harmless and avoids a valid compiler warning about
3519                  * using more of the array than we have allocated.
3520                  */
3521                 u8 esi[DP_DPRX_ESI_LEN+2] = {};
3522                 bool handled;
3523                 int retry;
3524
3525                 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
3526                         drm_dbg_kms(&i915->drm,
3527                                     "failed to get ESI - device may have failed\n");
3528                         link_ok = false;
3529
3530                         break;
3531                 }
3532
3533                 /* check link status - esi[10] = 0x200c */
3534                 if (intel_dp->active_mst_links > 0 && link_ok &&
3535                     !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3536                         drm_dbg_kms(&i915->drm,
3537                                     "channel EQ not ok, retraining\n");
3538                         link_ok = false;
3539                 }
3540
3541                 drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
3542
3543                 intel_dp_mst_hpd_irq(intel_dp, esi, &handled);
3544
3545                 if (!handled)
3546                         break;
3547
3548                 for (retry = 0; retry < 3; retry++) {
3549                         int wret;
3550
3551                         wret = drm_dp_dpcd_write(&intel_dp->aux,
3552                                                  DP_SINK_COUNT_ESI+1,
3553                                                  &esi[1], 3);
3554                         if (wret == 3)
3555                                 break;
3556                 }
3557         }
3558
3559         return link_ok;
3560 }
3561
3562 static void
3563 intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp)
3564 {
3565         bool is_active;
3566         u8 buf = 0;
3567
3568         is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux);
3569         if (intel_dp->frl.is_trained && !is_active) {
3570                 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0)
3571                         return;
3572
3573                 buf &=  ~DP_PCON_ENABLE_HDMI_LINK;
3574                 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0)
3575                         return;
3576
3577                 drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base);
3578
3579                 /* Restart FRL training or fall back to TMDS mode */
3580                 intel_dp_check_frl_training(intel_dp);
3581         }
3582 }
3583
3584 static bool
3585 intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
3586 {
3587         u8 link_status[DP_LINK_STATUS_SIZE];
3588
3589         if (!intel_dp->link_trained)
3590                 return false;
3591
3592         /*
3593          * While PSR source HW is enabled, it will control main-link sending
3594          * frames, enabling and disabling it so trying to do a retrain will fail
3595          * as the link would or not be on or it could mix training patterns
3596          * and frame data at the same time causing retrain to fail.
3597          * Also when exiting PSR, HW will retrain the link anyways fixing
3598          * any link status error.
3599          */
3600         if (intel_psr_enabled(intel_dp))
3601                 return false;
3602
3603         if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
3604                                              link_status) < 0)
3605                 return false;
3606
3607         /*
3608          * Validate the cached values of intel_dp->link_rate and
3609          * intel_dp->lane_count before attempting to retrain.
3610          *
3611          * FIXME would be nice to user the crtc state here, but since
3612          * we need to call this from the short HPD handler that seems
3613          * a bit hard.
3614          */
3615         if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
3616                                         intel_dp->lane_count))
3617                 return false;
3618
3619         /* Retrain if Channel EQ or CR not ok */
3620         return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
3621 }
3622
3623 static bool intel_dp_has_connector(struct intel_dp *intel_dp,
3624                                    const struct drm_connector_state *conn_state)
3625 {
3626         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3627         struct intel_encoder *encoder;
3628         enum pipe pipe;
3629
3630         if (!conn_state->best_encoder)
3631                 return false;
3632
3633         /* SST */
3634         encoder = &dp_to_dig_port(intel_dp)->base;
3635         if (conn_state->best_encoder == &encoder->base)
3636                 return true;
3637
3638         /* MST */
3639         for_each_pipe(i915, pipe) {
3640                 encoder = &intel_dp->mst_encoders[pipe]->base;
3641                 if (conn_state->best_encoder == &encoder->base)
3642                         return true;
3643         }
3644
3645         return false;
3646 }
3647
3648 static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp,
3649                                       struct drm_modeset_acquire_ctx *ctx,
3650                                       u32 *crtc_mask)
3651 {
3652         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3653         struct drm_connector_list_iter conn_iter;
3654         struct intel_connector *connector;
3655         int ret = 0;
3656
3657         *crtc_mask = 0;
3658
3659         if (!intel_dp_needs_link_retrain(intel_dp))
3660                 return 0;
3661
3662         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
3663         for_each_intel_connector_iter(connector, &conn_iter) {
3664                 struct drm_connector_state *conn_state =
3665                         connector->base.state;
3666                 struct intel_crtc_state *crtc_state;
3667                 struct intel_crtc *crtc;
3668
3669                 if (!intel_dp_has_connector(intel_dp, conn_state))
3670                         continue;
3671
3672                 crtc = to_intel_crtc(conn_state->crtc);
3673                 if (!crtc)
3674                         continue;
3675
3676                 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
3677                 if (ret)
3678                         break;
3679
3680                 crtc_state = to_intel_crtc_state(crtc->base.state);
3681
3682                 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
3683
3684                 if (!crtc_state->hw.active)
3685                         continue;
3686
3687                 if (conn_state->commit &&
3688                     !try_wait_for_completion(&conn_state->commit->hw_done))
3689                         continue;
3690
3691                 *crtc_mask |= drm_crtc_mask(&crtc->base);
3692         }
3693         drm_connector_list_iter_end(&conn_iter);
3694
3695         if (!intel_dp_needs_link_retrain(intel_dp))
3696                 *crtc_mask = 0;
3697
3698         return ret;
3699 }
3700
3701 static bool intel_dp_is_connected(struct intel_dp *intel_dp)
3702 {
3703         struct intel_connector *connector = intel_dp->attached_connector;
3704
3705         return connector->base.status == connector_status_connected ||
3706                 intel_dp->is_mst;
3707 }
3708
3709 int intel_dp_retrain_link(struct intel_encoder *encoder,
3710                           struct drm_modeset_acquire_ctx *ctx)
3711 {
3712         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3713         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3714         struct intel_crtc *crtc;
3715         u32 crtc_mask;
3716         int ret;
3717
3718         if (!intel_dp_is_connected(intel_dp))
3719                 return 0;
3720
3721         ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
3722                                ctx);
3723         if (ret)
3724                 return ret;
3725
3726         ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask);
3727         if (ret)
3728                 return ret;
3729
3730         if (crtc_mask == 0)
3731                 return 0;
3732
3733         drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n",
3734                     encoder->base.base.id, encoder->base.name);
3735
3736         for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
3737                 const struct intel_crtc_state *crtc_state =
3738                         to_intel_crtc_state(crtc->base.state);
3739
3740                 /* Suppress underruns caused by re-training */
3741                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
3742                 if (crtc_state->has_pch_encoder)
3743                         intel_set_pch_fifo_underrun_reporting(dev_priv,
3744                                                               intel_crtc_pch_transcoder(crtc), false);
3745         }
3746
3747         for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
3748                 const struct intel_crtc_state *crtc_state =
3749                         to_intel_crtc_state(crtc->base.state);
3750
3751                 /* retrain on the MST master transcoder */
3752                 if (DISPLAY_VER(dev_priv) >= 12 &&
3753                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
3754                     !intel_dp_mst_is_master_trans(crtc_state))
3755                         continue;
3756
3757                 intel_dp_check_frl_training(intel_dp);
3758                 intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
3759                 intel_dp_start_link_train(intel_dp, crtc_state);
3760                 intel_dp_stop_link_train(intel_dp, crtc_state);
3761                 break;
3762         }
3763
3764         for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
3765                 const struct intel_crtc_state *crtc_state =
3766                         to_intel_crtc_state(crtc->base.state);
3767
3768                 /* Keep underrun reporting disabled until things are stable */
3769                 intel_wait_for_vblank(dev_priv, crtc->pipe);
3770
3771                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
3772                 if (crtc_state->has_pch_encoder)
3773                         intel_set_pch_fifo_underrun_reporting(dev_priv,
3774                                                               intel_crtc_pch_transcoder(crtc), true);
3775         }
3776
3777         return 0;
3778 }
3779
3780 static int intel_dp_prep_phy_test(struct intel_dp *intel_dp,
3781                                   struct drm_modeset_acquire_ctx *ctx,
3782                                   u32 *crtc_mask)
3783 {
3784         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3785         struct drm_connector_list_iter conn_iter;
3786         struct intel_connector *connector;
3787         int ret = 0;
3788
3789         *crtc_mask = 0;
3790
3791         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
3792         for_each_intel_connector_iter(connector, &conn_iter) {
3793                 struct drm_connector_state *conn_state =
3794                         connector->base.state;
3795                 struct intel_crtc_state *crtc_state;
3796                 struct intel_crtc *crtc;
3797
3798                 if (!intel_dp_has_connector(intel_dp, conn_state))
3799                         continue;
3800
3801                 crtc = to_intel_crtc(conn_state->crtc);
3802                 if (!crtc)
3803                         continue;
3804
3805                 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
3806                 if (ret)
3807                         break;
3808
3809                 crtc_state = to_intel_crtc_state(crtc->base.state);
3810
3811                 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
3812
3813                 if (!crtc_state->hw.active)
3814                         continue;
3815
3816                 if (conn_state->commit &&
3817                     !try_wait_for_completion(&conn_state->commit->hw_done))
3818                         continue;
3819
3820                 *crtc_mask |= drm_crtc_mask(&crtc->base);
3821         }
3822         drm_connector_list_iter_end(&conn_iter);
3823
3824         return ret;
3825 }
3826
3827 static int intel_dp_do_phy_test(struct intel_encoder *encoder,
3828                                 struct drm_modeset_acquire_ctx *ctx)
3829 {
3830         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3831         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3832         struct intel_crtc *crtc;
3833         u32 crtc_mask;
3834         int ret;
3835
3836         ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
3837                                ctx);
3838         if (ret)
3839                 return ret;
3840
3841         ret = intel_dp_prep_phy_test(intel_dp, ctx, &crtc_mask);
3842         if (ret)
3843                 return ret;
3844
3845         if (crtc_mask == 0)
3846                 return 0;
3847
3848         drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n",
3849                     encoder->base.base.id, encoder->base.name);
3850
3851         for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
3852                 const struct intel_crtc_state *crtc_state =
3853                         to_intel_crtc_state(crtc->base.state);
3854
3855                 /* test on the MST master transcoder */
3856                 if (DISPLAY_VER(dev_priv) >= 12 &&
3857                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
3858                     !intel_dp_mst_is_master_trans(crtc_state))
3859                         continue;
3860
3861                 intel_dp_process_phy_request(intel_dp, crtc_state);
3862                 break;
3863         }
3864
3865         return 0;
3866 }
3867
3868 void intel_dp_phy_test(struct intel_encoder *encoder)
3869 {
3870         struct drm_modeset_acquire_ctx ctx;
3871         int ret;
3872
3873         drm_modeset_acquire_init(&ctx, 0);
3874
3875         for (;;) {
3876                 ret = intel_dp_do_phy_test(encoder, &ctx);
3877
3878                 if (ret == -EDEADLK) {
3879                         drm_modeset_backoff(&ctx);
3880                         continue;
3881                 }
3882
3883                 break;
3884         }
3885
3886         drm_modeset_drop_locks(&ctx);
3887         drm_modeset_acquire_fini(&ctx);
3888         drm_WARN(encoder->base.dev, ret,
3889                  "Acquiring modeset locks failed with %i\n", ret);
3890 }
3891
3892 static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
3893 {
3894         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3895         u8 val;
3896
3897         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
3898                 return;
3899
3900         if (drm_dp_dpcd_readb(&intel_dp->aux,
3901                               DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
3902                 return;
3903
3904         drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
3905
3906         if (val & DP_AUTOMATED_TEST_REQUEST)
3907                 intel_dp_handle_test_request(intel_dp);
3908
3909         if (val & DP_CP_IRQ)
3910                 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
3911
3912         if (val & DP_SINK_SPECIFIC_IRQ)
3913                 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
3914 }
3915
3916 static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
3917 {
3918         u8 val;
3919
3920         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
3921                 return;
3922
3923         if (drm_dp_dpcd_readb(&intel_dp->aux,
3924                               DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val)
3925                 return;
3926
3927         if (drm_dp_dpcd_writeb(&intel_dp->aux,
3928                                DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1)
3929                 return;
3930
3931         if (val & HDMI_LINK_STATUS_CHANGED)
3932                 intel_dp_handle_hdmi_link_status_change(intel_dp);
3933 }
3934
3935 /*
3936  * According to DP spec
3937  * 5.1.2:
3938  *  1. Read DPCD
3939  *  2. Configure link according to Receiver Capabilities
3940  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
3941  *  4. Check link status on receipt of hot-plug interrupt
3942  *
3943  * intel_dp_short_pulse -  handles short pulse interrupts
3944  * when full detection is not required.
3945  * Returns %true if short pulse is handled and full detection
3946  * is NOT required and %false otherwise.
3947  */
3948 static bool
3949 intel_dp_short_pulse(struct intel_dp *intel_dp)
3950 {
3951         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3952         u8 old_sink_count = intel_dp->sink_count;
3953         bool ret;
3954
3955         /*
3956          * Clearing compliance test variables to allow capturing
3957          * of values for next automated test request.
3958          */
3959         memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
3960
3961         /*
3962          * Now read the DPCD to see if it's actually running
3963          * If the current value of sink count doesn't match with
3964          * the value that was stored earlier or dpcd read failed
3965          * we need to do full detection
3966          */
3967         ret = intel_dp_get_dpcd(intel_dp);
3968
3969         if ((old_sink_count != intel_dp->sink_count) || !ret) {
3970                 /* No need to proceed if we are going to do full detect */
3971                 return false;
3972         }
3973
3974         intel_dp_check_device_service_irq(intel_dp);
3975         intel_dp_check_link_service_irq(intel_dp);
3976
3977         /* Handle CEC interrupts, if any */
3978         drm_dp_cec_irq(&intel_dp->aux);
3979
3980         /* defer to the hotplug work for link retraining if needed */
3981         if (intel_dp_needs_link_retrain(intel_dp))
3982                 return false;
3983
3984         intel_psr_short_pulse(intel_dp);
3985
3986         switch (intel_dp->compliance.test_type) {
3987         case DP_TEST_LINK_TRAINING:
3988                 drm_dbg_kms(&dev_priv->drm,
3989                             "Link Training Compliance Test requested\n");
3990                 /* Send a Hotplug Uevent to userspace to start modeset */
3991                 drm_kms_helper_hotplug_event(&dev_priv->drm);
3992                 break;
3993         case DP_TEST_LINK_PHY_TEST_PATTERN:
3994                 drm_dbg_kms(&dev_priv->drm,
3995                             "PHY test pattern Compliance Test requested\n");
3996                 /*
3997                  * Schedule long hpd to do the test
3998                  *
3999                  * FIXME get rid of the ad-hoc phy test modeset code
4000                  * and properly incorporate it into the normal modeset.
4001                  */
4002                 return false;
4003         }
4004
4005         return true;
4006 }
4007
4008 /* XXX this is probably wrong for multiple downstream ports */
4009 static enum drm_connector_status
4010 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4011 {
4012         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4013         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4014         u8 *dpcd = intel_dp->dpcd;
4015         u8 type;
4016
4017         if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp)))
4018                 return connector_status_connected;
4019
4020         lspcon_resume(dig_port);
4021
4022         if (!intel_dp_get_dpcd(intel_dp))
4023                 return connector_status_disconnected;
4024
4025         /* if there's no downstream port, we're done */
4026         if (!drm_dp_is_branch(dpcd))
4027                 return connector_status_connected;
4028
4029         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4030         if (intel_dp_has_sink_count(intel_dp) &&
4031             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4032                 return intel_dp->sink_count ?
4033                 connector_status_connected : connector_status_disconnected;
4034         }
4035
4036         if (intel_dp_can_mst(intel_dp))
4037                 return connector_status_connected;
4038
4039         /* If no HPD, poke DDC gently */
4040         if (drm_probe_ddc(&intel_dp->aux.ddc))
4041                 return connector_status_connected;
4042
4043         /* Well we tried, say unknown for unreliable port types */
4044         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4045                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4046                 if (type == DP_DS_PORT_TYPE_VGA ||
4047                     type == DP_DS_PORT_TYPE_NON_EDID)
4048                         return connector_status_unknown;
4049         } else {
4050                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4051                         DP_DWN_STRM_PORT_TYPE_MASK;
4052                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4053                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4054                         return connector_status_unknown;
4055         }
4056
4057         /* Anything else is out of spec, warn and ignore */
4058         drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n");
4059         return connector_status_disconnected;
4060 }
4061
4062 static enum drm_connector_status
4063 edp_detect(struct intel_dp *intel_dp)
4064 {
4065         return connector_status_connected;
4066 }
4067
4068 /*
4069  * intel_digital_port_connected - is the specified port connected?
4070  * @encoder: intel_encoder
4071  *
4072  * In cases where there's a connector physically connected but it can't be used
4073  * by our hardware we also return false, since the rest of the driver should
4074  * pretty much treat the port as disconnected. This is relevant for type-C
4075  * (starting on ICL) where there's ownership involved.
4076  *
4077  * Return %true if port is connected, %false otherwise.
4078  */
4079 bool intel_digital_port_connected(struct intel_encoder *encoder)
4080 {
4081         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4082         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4083         bool is_connected = false;
4084         intel_wakeref_t wakeref;
4085
4086         with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
4087                 is_connected = dig_port->connected(encoder);
4088
4089         return is_connected;
4090 }
4091
4092 static struct edid *
4093 intel_dp_get_edid(struct intel_dp *intel_dp)
4094 {
4095         struct intel_connector *intel_connector = intel_dp->attached_connector;
4096
4097         /* use cached edid if we have one */
4098         if (intel_connector->edid) {
4099                 /* invalid edid */
4100                 if (IS_ERR(intel_connector->edid))
4101                         return NULL;
4102
4103                 return drm_edid_duplicate(intel_connector->edid);
4104         } else
4105                 return drm_get_edid(&intel_connector->base,
4106                                     &intel_dp->aux.ddc);
4107 }
4108
4109 static void
4110 intel_dp_update_dfp(struct intel_dp *intel_dp,
4111                     const struct edid *edid)
4112 {
4113         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4114         struct intel_connector *connector = intel_dp->attached_connector;
4115
4116         intel_dp->dfp.max_bpc =
4117                 drm_dp_downstream_max_bpc(intel_dp->dpcd,
4118                                           intel_dp->downstream_ports, edid);
4119
4120         intel_dp->dfp.max_dotclock =
4121                 drm_dp_downstream_max_dotclock(intel_dp->dpcd,
4122                                                intel_dp->downstream_ports);
4123
4124         intel_dp->dfp.min_tmds_clock =
4125                 drm_dp_downstream_min_tmds_clock(intel_dp->dpcd,
4126                                                  intel_dp->downstream_ports,
4127                                                  edid);
4128         intel_dp->dfp.max_tmds_clock =
4129                 drm_dp_downstream_max_tmds_clock(intel_dp->dpcd,
4130                                                  intel_dp->downstream_ports,
4131                                                  edid);
4132
4133         intel_dp->dfp.pcon_max_frl_bw =
4134                 drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd,
4135                                            intel_dp->downstream_ports);
4136
4137         drm_dbg_kms(&i915->drm,
4138                     "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n",
4139                     connector->base.base.id, connector->base.name,
4140                     intel_dp->dfp.max_bpc,
4141                     intel_dp->dfp.max_dotclock,
4142                     intel_dp->dfp.min_tmds_clock,
4143                     intel_dp->dfp.max_tmds_clock,
4144                     intel_dp->dfp.pcon_max_frl_bw);
4145
4146         intel_dp_get_pcon_dsc_cap(intel_dp);
4147 }
4148
4149 static void
4150 intel_dp_update_420(struct intel_dp *intel_dp)
4151 {
4152         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4153         struct intel_connector *connector = intel_dp->attached_connector;
4154         bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr;
4155
4156         /* No YCbCr output support on gmch platforms */
4157         if (HAS_GMCH(i915))
4158                 return;
4159
4160         /*
4161          * ILK doesn't seem capable of DP YCbCr output. The
4162          * displayed image is severly corrupted. SNB+ is fine.
4163          */
4164         if (IS_IRONLAKE(i915))
4165                 return;
4166
4167         is_branch = drm_dp_is_branch(intel_dp->dpcd);
4168         ycbcr_420_passthrough =
4169                 drm_dp_downstream_420_passthrough(intel_dp->dpcd,
4170                                                   intel_dp->downstream_ports);
4171         /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */
4172         ycbcr_444_to_420 =
4173                 dp_to_dig_port(intel_dp)->lspcon.active ||
4174                 drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd,
4175                                                         intel_dp->downstream_ports);
4176         rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
4177                                                                  intel_dp->downstream_ports,
4178                                                                  DP_DS_HDMI_BT601_RGB_YCBCR_CONV |
4179                                                                  DP_DS_HDMI_BT709_RGB_YCBCR_CONV |
4180                                                                  DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
4181
4182         if (DISPLAY_VER(i915) >= 11) {
4183                 /* Let PCON convert from RGB->YCbCr if possible */
4184                 if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) {
4185                         intel_dp->dfp.rgb_to_ycbcr = true;
4186                         intel_dp->dfp.ycbcr_444_to_420 = true;
4187                         connector->base.ycbcr_420_allowed = true;
4188                 } else {
4189                 /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */
4190                         intel_dp->dfp.ycbcr_444_to_420 =
4191                                 ycbcr_444_to_420 && !ycbcr_420_passthrough;
4192
4193                         connector->base.ycbcr_420_allowed =
4194                                 !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough;
4195                 }
4196         } else {
4197                 /* 4:4:4->4:2:0 conversion is the only way */
4198                 intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420;
4199
4200                 connector->base.ycbcr_420_allowed = ycbcr_444_to_420;
4201         }
4202
4203         drm_dbg_kms(&i915->drm,
4204                     "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
4205                     connector->base.base.id, connector->base.name,
4206                     yesno(intel_dp->dfp.rgb_to_ycbcr),
4207                     yesno(connector->base.ycbcr_420_allowed),
4208                     yesno(intel_dp->dfp.ycbcr_444_to_420));
4209 }
4210
4211 static void
4212 intel_dp_set_edid(struct intel_dp *intel_dp)
4213 {
4214         struct intel_connector *connector = intel_dp->attached_connector;
4215         struct edid *edid;
4216
4217         intel_dp_unset_edid(intel_dp);
4218         edid = intel_dp_get_edid(intel_dp);
4219         connector->detect_edid = edid;
4220
4221         intel_dp_update_dfp(intel_dp, edid);
4222         intel_dp_update_420(intel_dp);
4223
4224         if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
4225                 intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
4226                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4227         }
4228
4229         drm_dp_cec_set_edid(&intel_dp->aux, edid);
4230 }
4231
4232 static void
4233 intel_dp_unset_edid(struct intel_dp *intel_dp)
4234 {
4235         struct intel_connector *connector = intel_dp->attached_connector;
4236
4237         drm_dp_cec_unset_edid(&intel_dp->aux);
4238         kfree(connector->detect_edid);
4239         connector->detect_edid = NULL;
4240
4241         intel_dp->has_hdmi_sink = false;
4242         intel_dp->has_audio = false;
4243
4244         intel_dp->dfp.max_bpc = 0;
4245         intel_dp->dfp.max_dotclock = 0;
4246         intel_dp->dfp.min_tmds_clock = 0;
4247         intel_dp->dfp.max_tmds_clock = 0;
4248
4249         intel_dp->dfp.pcon_max_frl_bw = 0;
4250
4251         intel_dp->dfp.ycbcr_444_to_420 = false;
4252         connector->base.ycbcr_420_allowed = false;
4253 }
4254
4255 static int
4256 intel_dp_detect(struct drm_connector *connector,
4257                 struct drm_modeset_acquire_ctx *ctx,
4258                 bool force)
4259 {
4260         struct drm_i915_private *dev_priv = to_i915(connector->dev);
4261         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
4262         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4263         struct intel_encoder *encoder = &dig_port->base;
4264         enum drm_connector_status status;
4265
4266         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
4267                     connector->base.id, connector->name);
4268         drm_WARN_ON(&dev_priv->drm,
4269                     !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
4270
4271         if (!INTEL_DISPLAY_ENABLED(dev_priv))
4272                 return connector_status_disconnected;
4273
4274         /* Can't disconnect eDP */
4275         if (intel_dp_is_edp(intel_dp))
4276                 status = edp_detect(intel_dp);
4277         else if (intel_digital_port_connected(encoder))
4278                 status = intel_dp_detect_dpcd(intel_dp);
4279         else
4280                 status = connector_status_disconnected;
4281
4282         if (status == connector_status_disconnected) {
4283                 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4284                 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4285
4286                 if (intel_dp->is_mst) {
4287                         drm_dbg_kms(&dev_priv->drm,
4288                                     "MST device may have disappeared %d vs %d\n",
4289                                     intel_dp->is_mst,
4290                                     intel_dp->mst_mgr.mst_state);
4291                         intel_dp->is_mst = false;
4292                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4293                                                         intel_dp->is_mst);
4294                 }
4295
4296                 goto out;
4297         }
4298
4299         /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
4300         if (DISPLAY_VER(dev_priv) >= 11)
4301                 intel_dp_get_dsc_sink_cap(intel_dp);
4302
4303         intel_dp_configure_mst(intel_dp);
4304
4305         /*
4306          * TODO: Reset link params when switching to MST mode, until MST
4307          * supports link training fallback params.
4308          */
4309         if (intel_dp->reset_link_params || intel_dp->is_mst) {
4310                 /* Initial max link lane count */
4311                 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
4312
4313                 /* Initial max link rate */
4314                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
4315
4316                 intel_dp->reset_link_params = false;
4317         }
4318
4319         intel_dp_print_rates(intel_dp);
4320
4321         if (intel_dp->is_mst) {
4322                 /*
4323                  * If we are in MST mode then this connector
4324                  * won't appear connected or have anything
4325                  * with EDID on it
4326                  */
4327                 status = connector_status_disconnected;
4328                 goto out;
4329         }
4330
4331         /*
4332          * Some external monitors do not signal loss of link synchronization
4333          * with an IRQ_HPD, so force a link status check.
4334          */
4335         if (!intel_dp_is_edp(intel_dp)) {
4336                 int ret;
4337
4338                 ret = intel_dp_retrain_link(encoder, ctx);
4339                 if (ret)
4340                         return ret;
4341         }
4342
4343         /*
4344          * Clearing NACK and defer counts to get their exact values
4345          * while reading EDID which are required by Compliance tests
4346          * 4.2.2.4 and 4.2.2.5
4347          */
4348         intel_dp->aux.i2c_nack_count = 0;
4349         intel_dp->aux.i2c_defer_count = 0;
4350
4351         intel_dp_set_edid(intel_dp);
4352         if (intel_dp_is_edp(intel_dp) ||
4353             to_intel_connector(connector)->detect_edid)
4354                 status = connector_status_connected;
4355
4356         intel_dp_check_device_service_irq(intel_dp);
4357
4358 out:
4359         if (status != connector_status_connected && !intel_dp->is_mst)
4360                 intel_dp_unset_edid(intel_dp);
4361
4362         /*
4363          * Make sure the refs for power wells enabled during detect are
4364          * dropped to avoid a new detect cycle triggered by HPD polling.
4365          */
4366         intel_display_power_flush_work(dev_priv);
4367
4368         if (!intel_dp_is_edp(intel_dp))
4369                 drm_dp_set_subconnector_property(connector,
4370                                                  status,
4371                                                  intel_dp->dpcd,
4372                                                  intel_dp->downstream_ports);
4373         return status;
4374 }
4375
4376 static void
4377 intel_dp_force(struct drm_connector *connector)
4378 {
4379         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
4380         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4381         struct intel_encoder *intel_encoder = &dig_port->base;
4382         struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4383         enum intel_display_power_domain aux_domain =
4384                 intel_aux_power_domain(dig_port);
4385         intel_wakeref_t wakeref;
4386
4387         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
4388                     connector->base.id, connector->name);
4389         intel_dp_unset_edid(intel_dp);
4390
4391         if (connector->status != connector_status_connected)
4392                 return;
4393
4394         wakeref = intel_display_power_get(dev_priv, aux_domain);
4395
4396         intel_dp_set_edid(intel_dp);
4397
4398         intel_display_power_put(dev_priv, aux_domain, wakeref);
4399 }
4400
4401 static int intel_dp_get_modes(struct drm_connector *connector)
4402 {
4403         struct intel_connector *intel_connector = to_intel_connector(connector);
4404         struct edid *edid;
4405         int num_modes = 0;
4406
4407         edid = intel_connector->detect_edid;
4408         if (edid) {
4409                 num_modes = intel_connector_update_modes(connector, edid);
4410
4411                 if (intel_vrr_is_capable(connector))
4412                         drm_connector_set_vrr_capable_property(connector,
4413                                                                true);
4414         }
4415
4416         /* Also add fixed mode, which may or may not be present in EDID */
4417         if (intel_dp_is_edp(intel_attached_dp(intel_connector)) &&
4418             intel_connector->panel.fixed_mode) {
4419                 struct drm_display_mode *mode;
4420
4421                 mode = drm_mode_duplicate(connector->dev,
4422                                           intel_connector->panel.fixed_mode);
4423                 if (mode) {
4424                         drm_mode_probed_add(connector, mode);
4425                         num_modes++;
4426                 }
4427         }
4428
4429         if (num_modes)
4430                 return num_modes;
4431
4432         if (!edid) {
4433                 struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
4434                 struct drm_display_mode *mode;
4435
4436                 mode = drm_dp_downstream_mode(connector->dev,
4437                                               intel_dp->dpcd,
4438                                               intel_dp->downstream_ports);
4439                 if (mode) {
4440                         drm_mode_probed_add(connector, mode);
4441                         num_modes++;
4442                 }
4443         }
4444
4445         return num_modes;
4446 }
4447
4448 static int
4449 intel_dp_connector_register(struct drm_connector *connector)
4450 {
4451         struct drm_i915_private *i915 = to_i915(connector->dev);
4452         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
4453         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4454         struct intel_lspcon *lspcon = &dig_port->lspcon;
4455         int ret;
4456
4457         ret = intel_connector_register(connector);
4458         if (ret)
4459                 return ret;
4460
4461         drm_dbg_kms(&i915->drm, "registering %s bus for %s\n",
4462                     intel_dp->aux.name, connector->kdev->kobj.name);
4463
4464         intel_dp->aux.dev = connector->kdev;
4465         ret = drm_dp_aux_register(&intel_dp->aux);
4466         if (!ret)
4467                 drm_dp_cec_register_connector(&intel_dp->aux, connector);
4468
4469         if (!intel_bios_is_lspcon_present(i915, dig_port->base.port))
4470                 return ret;
4471
4472         /*
4473          * ToDo: Clean this up to handle lspcon init and resume more
4474          * efficiently and streamlined.
4475          */
4476         if (lspcon_init(dig_port)) {
4477                 lspcon_detect_hdr_capability(lspcon);
4478                 if (lspcon->hdr_supported)
4479                         drm_object_attach_property(&connector->base,
4480                                                    connector->dev->mode_config.hdr_output_metadata_property,
4481                                                    0);
4482         }
4483
4484         return ret;
4485 }
4486
4487 static void
4488 intel_dp_connector_unregister(struct drm_connector *connector)
4489 {
4490         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
4491
4492         drm_dp_cec_unregister_connector(&intel_dp->aux);
4493         drm_dp_aux_unregister(&intel_dp->aux);
4494         intel_connector_unregister(connector);
4495 }
4496
4497 void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
4498 {
4499         struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
4500         struct intel_dp *intel_dp = &dig_port->dp;
4501
4502         intel_dp_mst_encoder_cleanup(dig_port);
4503
4504         intel_pps_vdd_off_sync(intel_dp);
4505
4506         intel_dp_aux_fini(intel_dp);
4507 }
4508
4509 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4510 {
4511         struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
4512
4513         intel_pps_vdd_off_sync(intel_dp);
4514 }
4515
4516 void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
4517 {
4518         struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
4519
4520         intel_pps_wait_power_cycle(intel_dp);
4521 }
4522
4523 static int intel_modeset_tile_group(struct intel_atomic_state *state,
4524                                     int tile_group_id)
4525 {
4526         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4527         struct drm_connector_list_iter conn_iter;
4528         struct drm_connector *connector;
4529         int ret = 0;
4530
4531         drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
4532         drm_for_each_connector_iter(connector, &conn_iter) {
4533                 struct drm_connector_state *conn_state;
4534                 struct intel_crtc_state *crtc_state;
4535                 struct intel_crtc *crtc;
4536
4537                 if (!connector->has_tile ||
4538                     connector->tile_group->id != tile_group_id)
4539                         continue;
4540
4541                 conn_state = drm_atomic_get_connector_state(&state->base,
4542                                                             connector);
4543                 if (IS_ERR(conn_state)) {
4544                         ret = PTR_ERR(conn_state);
4545                         break;
4546                 }
4547
4548                 crtc = to_intel_crtc(conn_state->crtc);
4549
4550                 if (!crtc)
4551                         continue;
4552
4553                 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
4554                 crtc_state->uapi.mode_changed = true;
4555
4556                 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
4557                 if (ret)
4558                         break;
4559         }
4560         drm_connector_list_iter_end(&conn_iter);
4561
4562         return ret;
4563 }
4564
4565 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders)
4566 {
4567         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4568         struct intel_crtc *crtc;
4569
4570         if (transcoders == 0)
4571                 return 0;
4572
4573         for_each_intel_crtc(&dev_priv->drm, crtc) {
4574                 struct intel_crtc_state *crtc_state;
4575                 int ret;
4576
4577                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
4578                 if (IS_ERR(crtc_state))
4579                         return PTR_ERR(crtc_state);
4580
4581                 if (!crtc_state->hw.enable)
4582                         continue;
4583
4584                 if (!(transcoders & BIT(crtc_state->cpu_transcoder)))
4585                         continue;
4586
4587                 crtc_state->uapi.mode_changed = true;
4588
4589                 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
4590                 if (ret)
4591                         return ret;
4592
4593                 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
4594                 if (ret)
4595                         return ret;
4596
4597                 transcoders &= ~BIT(crtc_state->cpu_transcoder);
4598         }
4599
4600         drm_WARN_ON(&dev_priv->drm, transcoders != 0);
4601
4602         return 0;
4603 }
4604
4605 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
4606                                       struct drm_connector *connector)
4607 {
4608         const struct drm_connector_state *old_conn_state =
4609                 drm_atomic_get_old_connector_state(&state->base, connector);
4610         const struct intel_crtc_state *old_crtc_state;
4611         struct intel_crtc *crtc;
4612         u8 transcoders;
4613
4614         crtc = to_intel_crtc(old_conn_state->crtc);
4615         if (!crtc)
4616                 return 0;
4617
4618         old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
4619
4620         if (!old_crtc_state->hw.active)
4621                 return 0;
4622
4623         transcoders = old_crtc_state->sync_mode_slaves_mask;
4624         if (old_crtc_state->master_transcoder != INVALID_TRANSCODER)
4625                 transcoders |= BIT(old_crtc_state->master_transcoder);
4626
4627         return intel_modeset_affected_transcoders(state,
4628                                                   transcoders);
4629 }
4630
4631 static int intel_dp_connector_atomic_check(struct drm_connector *conn,
4632                                            struct drm_atomic_state *_state)
4633 {
4634         struct drm_i915_private *dev_priv = to_i915(conn->dev);
4635         struct intel_atomic_state *state = to_intel_atomic_state(_state);
4636         int ret;
4637
4638         ret = intel_digital_connector_atomic_check(conn, &state->base);
4639         if (ret)
4640                 return ret;
4641
4642         /*
4643          * We don't enable port sync on BDW due to missing w/as and
4644          * due to not having adjusted the modeset sequence appropriately.
4645          */
4646         if (DISPLAY_VER(dev_priv) < 9)
4647                 return 0;
4648
4649         if (!intel_connector_needs_modeset(state, conn))
4650                 return 0;
4651
4652         if (conn->has_tile) {
4653                 ret = intel_modeset_tile_group(state, conn->tile_group->id);
4654                 if (ret)
4655                         return ret;
4656         }
4657
4658         return intel_modeset_synced_crtcs(state, conn);
4659 }
4660
4661 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4662         .force = intel_dp_force,
4663         .fill_modes = drm_helper_probe_single_connector_modes,
4664         .atomic_get_property = intel_digital_connector_atomic_get_property,
4665         .atomic_set_property = intel_digital_connector_atomic_set_property,
4666         .late_register = intel_dp_connector_register,
4667         .early_unregister = intel_dp_connector_unregister,
4668         .destroy = intel_connector_destroy,
4669         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4670         .atomic_duplicate_state = intel_digital_connector_duplicate_state,
4671 };
4672
4673 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4674         .detect_ctx = intel_dp_detect,
4675         .get_modes = intel_dp_get_modes,
4676         .mode_valid = intel_dp_mode_valid,
4677         .atomic_check = intel_dp_connector_atomic_check,
4678 };
4679
4680 enum irqreturn
4681 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
4682 {
4683         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
4684         struct intel_dp *intel_dp = &dig_port->dp;
4685
4686         if (dig_port->base.type == INTEL_OUTPUT_EDP &&
4687             (long_hpd || !intel_pps_have_power(intel_dp))) {
4688                 /*
4689                  * vdd off can generate a long/short pulse on eDP which
4690                  * would require vdd on to handle it, and thus we
4691                  * would end up in an endless cycle of
4692                  * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..."
4693                  */
4694                 drm_dbg_kms(&i915->drm,
4695                             "ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
4696                             long_hpd ? "long" : "short",
4697                             dig_port->base.base.base.id,
4698                             dig_port->base.base.name);
4699                 return IRQ_HANDLED;
4700         }
4701
4702         drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
4703                     dig_port->base.base.base.id,
4704                     dig_port->base.base.name,
4705                     long_hpd ? "long" : "short");
4706
4707         if (long_hpd) {
4708                 intel_dp->reset_link_params = true;
4709                 return IRQ_NONE;
4710         }
4711
4712         if (intel_dp->is_mst) {
4713                 if (!intel_dp_check_mst_status(intel_dp))
4714                         return IRQ_NONE;
4715         } else if (!intel_dp_short_pulse(intel_dp)) {
4716                 return IRQ_NONE;
4717         }
4718
4719         return IRQ_HANDLED;
4720 }
4721
4722 /* check the VBT to see whether the eDP is on another port */
4723 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
4724 {
4725         /*
4726          * eDP not supported on g4x. so bail out early just
4727          * for a bit extra safety in case the VBT is bonkers.
4728          */
4729         if (DISPLAY_VER(dev_priv) < 5)
4730                 return false;
4731
4732         if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A)
4733                 return true;
4734
4735         return intel_bios_is_port_edp(dev_priv, port);
4736 }
4737
4738 static void
4739 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4740 {
4741         struct drm_i915_private *dev_priv = to_i915(connector->dev);
4742         enum port port = dp_to_dig_port(intel_dp)->base.port;
4743
4744         if (!intel_dp_is_edp(intel_dp))
4745                 drm_connector_attach_dp_subconnector_property(connector);
4746
4747         if (!IS_G4X(dev_priv) && port != PORT_A)
4748                 intel_attach_force_audio_property(connector);
4749
4750         intel_attach_broadcast_rgb_property(connector);
4751         if (HAS_GMCH(dev_priv))
4752                 drm_connector_attach_max_bpc_property(connector, 6, 10);
4753         else if (DISPLAY_VER(dev_priv) >= 5)
4754                 drm_connector_attach_max_bpc_property(connector, 6, 12);
4755
4756         /* Register HDMI colorspace for case of lspcon */
4757         if (intel_bios_is_lspcon_present(dev_priv, port)) {
4758                 drm_connector_attach_content_type_property(connector);
4759                 intel_attach_hdmi_colorspace_property(connector);
4760         } else {
4761                 intel_attach_dp_colorspace_property(connector);
4762         }
4763
4764         if (IS_GEMINILAKE(dev_priv) || DISPLAY_VER(dev_priv) >= 11)
4765                 drm_object_attach_property(&connector->base,
4766                                            connector->dev->mode_config.hdr_output_metadata_property,
4767                                            0);
4768
4769         if (intel_dp_is_edp(intel_dp)) {
4770                 u32 allowed_scalers;
4771
4772                 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
4773                 if (!HAS_GMCH(dev_priv))
4774                         allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
4775
4776                 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
4777
4778                 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
4779
4780         }
4781
4782         if (HAS_VRR(dev_priv))
4783                 drm_connector_attach_vrr_capable_property(connector);
4784 }
4785
4786 /**
4787  * intel_dp_set_drrs_state - program registers for RR switch to take effect
4788  * @dev_priv: i915 device
4789  * @crtc_state: a pointer to the active intel_crtc_state
4790  * @refresh_rate: RR to be programmed
4791  *
4792  * This function gets called when refresh rate (RR) has to be changed from
4793  * one frequency to another. Switches can be between high and low RR
4794  * supported by the panel or to any other RR based on media playback (in
4795  * this case, RR value needs to be passed from user space).
4796  *
4797  * The caller of this function needs to take a lock on dev_priv->drrs.
4798  */
4799 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
4800                                     const struct intel_crtc_state *crtc_state,
4801                                     int refresh_rate)
4802 {
4803         struct intel_dp *intel_dp = dev_priv->drrs.dp;
4804         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4805         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
4806
4807         if (refresh_rate <= 0) {
4808                 drm_dbg_kms(&dev_priv->drm,
4809                             "Refresh rate should be positive non-zero.\n");
4810                 return;
4811         }
4812
4813         if (intel_dp == NULL) {
4814                 drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n");
4815                 return;
4816         }
4817
4818         if (!crtc) {
4819                 drm_dbg_kms(&dev_priv->drm,
4820                             "DRRS: intel_crtc not initialized\n");
4821                 return;
4822         }
4823
4824         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
4825                 drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n");
4826                 return;
4827         }
4828
4829         if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) ==
4830                         refresh_rate)
4831                 index = DRRS_LOW_RR;
4832
4833         if (index == dev_priv->drrs.refresh_rate_type) {
4834                 drm_dbg_kms(&dev_priv->drm,
4835                             "DRRS requested for previously set RR...ignoring\n");
4836                 return;
4837         }
4838
4839         if (!crtc_state->hw.active) {
4840                 drm_dbg_kms(&dev_priv->drm,
4841                             "eDP encoder disabled. CRTC not Active\n");
4842                 return;
4843         }
4844
4845         if (DISPLAY_VER(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
4846                 switch (index) {
4847                 case DRRS_HIGH_RR:
4848                         intel_dp_set_m_n(crtc_state, M1_N1);
4849                         break;
4850                 case DRRS_LOW_RR:
4851                         intel_dp_set_m_n(crtc_state, M2_N2);
4852                         break;
4853                 case DRRS_MAX_RR:
4854                 default:
4855                         drm_err(&dev_priv->drm,
4856                                 "Unsupported refreshrate type\n");
4857                 }
4858         } else if (DISPLAY_VER(dev_priv) > 6) {
4859                 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
4860                 u32 val;
4861
4862                 val = intel_de_read(dev_priv, reg);
4863                 if (index > DRRS_HIGH_RR) {
4864                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4865                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4866                         else
4867                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
4868                 } else {
4869                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4870                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4871                         else
4872                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
4873                 }
4874                 intel_de_write(dev_priv, reg, val);
4875         }
4876
4877         dev_priv->drrs.refresh_rate_type = index;
4878
4879         drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n",
4880                     refresh_rate);
4881 }
4882
4883 static void
4884 intel_edp_drrs_enable_locked(struct intel_dp *intel_dp)
4885 {
4886         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4887
4888         dev_priv->drrs.busy_frontbuffer_bits = 0;
4889         dev_priv->drrs.dp = intel_dp;
4890 }
4891
4892 /**
4893  * intel_edp_drrs_enable - init drrs struct if supported
4894  * @intel_dp: DP struct
4895  * @crtc_state: A pointer to the active crtc state.
4896  *
4897  * Initializes frontbuffer_bits and drrs.dp
4898  */
4899 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
4900                            const struct intel_crtc_state *crtc_state)
4901 {
4902         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4903
4904         if (!crtc_state->has_drrs)
4905                 return;
4906
4907         drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n");
4908
4909         mutex_lock(&dev_priv->drrs.mutex);
4910
4911         if (dev_priv->drrs.dp) {
4912                 drm_warn(&dev_priv->drm, "DRRS already enabled\n");
4913                 goto unlock;
4914         }
4915
4916         intel_edp_drrs_enable_locked(intel_dp);
4917
4918 unlock:
4919         mutex_unlock(&dev_priv->drrs.mutex);
4920 }
4921
4922 static void
4923 intel_edp_drrs_disable_locked(struct intel_dp *intel_dp,
4924                               const struct intel_crtc_state *crtc_state)
4925 {
4926         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4927
4928         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
4929                 int refresh;
4930
4931                 refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode);
4932                 intel_dp_set_drrs_state(dev_priv, crtc_state, refresh);
4933         }
4934
4935         dev_priv->drrs.dp = NULL;
4936 }
4937
4938 /**
4939  * intel_edp_drrs_disable - Disable DRRS
4940  * @intel_dp: DP struct
4941  * @old_crtc_state: Pointer to old crtc_state.
4942  *
4943  */
4944 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
4945                             const struct intel_crtc_state *old_crtc_state)
4946 {
4947         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4948
4949         if (!old_crtc_state->has_drrs)
4950                 return;
4951
4952         mutex_lock(&dev_priv->drrs.mutex);
4953         if (!dev_priv->drrs.dp) {
4954                 mutex_unlock(&dev_priv->drrs.mutex);
4955                 return;
4956         }
4957
4958         intel_edp_drrs_disable_locked(intel_dp, old_crtc_state);
4959         mutex_unlock(&dev_priv->drrs.mutex);
4960
4961         cancel_delayed_work_sync(&dev_priv->drrs.work);
4962 }
4963
4964 /**
4965  * intel_edp_drrs_update - Update DRRS state
4966  * @intel_dp: Intel DP
4967  * @crtc_state: new CRTC state
4968  *
4969  * This function will update DRRS states, disabling or enabling DRRS when
4970  * executing fastsets. For full modeset, intel_edp_drrs_disable() and
4971  * intel_edp_drrs_enable() should be called instead.
4972  */
4973 void
4974 intel_edp_drrs_update(struct intel_dp *intel_dp,
4975                       const struct intel_crtc_state *crtc_state)
4976 {
4977         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4978
4979         if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
4980                 return;
4981
4982         mutex_lock(&dev_priv->drrs.mutex);
4983
4984         /* New state matches current one? */
4985         if (crtc_state->has_drrs == !!dev_priv->drrs.dp)
4986                 goto unlock;
4987
4988         if (crtc_state->has_drrs)
4989                 intel_edp_drrs_enable_locked(intel_dp);
4990         else
4991                 intel_edp_drrs_disable_locked(intel_dp, crtc_state);
4992
4993 unlock:
4994         mutex_unlock(&dev_priv->drrs.mutex);
4995 }
4996
4997 static void intel_edp_drrs_downclock_work(struct work_struct *work)
4998 {
4999         struct drm_i915_private *dev_priv =
5000                 container_of(work, typeof(*dev_priv), drrs.work.work);
5001         struct intel_dp *intel_dp;
5002
5003         mutex_lock(&dev_priv->drrs.mutex);
5004
5005         intel_dp = dev_priv->drrs.dp;
5006
5007         if (!intel_dp)
5008                 goto unlock;
5009
5010         /*
5011          * The delayed work can race with an invalidate hence we need to
5012          * recheck.
5013          */
5014
5015         if (dev_priv->drrs.busy_frontbuffer_bits)
5016                 goto unlock;
5017
5018         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
5019                 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
5020
5021                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5022                         drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode));
5023         }
5024
5025 unlock:
5026         mutex_unlock(&dev_priv->drrs.mutex);
5027 }
5028
5029 /**
5030  * intel_edp_drrs_invalidate - Disable Idleness DRRS
5031  * @dev_priv: i915 device
5032  * @frontbuffer_bits: frontbuffer plane tracking bits
5033  *
5034  * This function gets called everytime rendering on the given planes start.
5035  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5036  *
5037  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5038  */
5039 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
5040                                unsigned int frontbuffer_bits)
5041 {
5042         struct intel_dp *intel_dp;
5043         struct drm_crtc *crtc;
5044         enum pipe pipe;
5045
5046         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5047                 return;
5048
5049         cancel_delayed_work(&dev_priv->drrs.work);
5050
5051         mutex_lock(&dev_priv->drrs.mutex);
5052
5053         intel_dp = dev_priv->drrs.dp;
5054         if (!intel_dp) {
5055                 mutex_unlock(&dev_priv->drrs.mutex);
5056                 return;
5057         }
5058
5059         crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
5060         pipe = to_intel_crtc(crtc)->pipe;
5061
5062         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5063         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5064
5065         /* invalidate means busy screen hence upclock */
5066         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5067                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5068                                         drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
5069
5070         mutex_unlock(&dev_priv->drrs.mutex);
5071 }
5072
5073 /**
5074  * intel_edp_drrs_flush - Restart Idleness DRRS
5075  * @dev_priv: i915 device
5076  * @frontbuffer_bits: frontbuffer plane tracking bits
5077  *
5078  * This function gets called every time rendering on the given planes has
5079  * completed or flip on a crtc is completed. So DRRS should be upclocked
5080  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5081  * if no other planes are dirty.
5082  *
5083  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5084  */
5085 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
5086                           unsigned int frontbuffer_bits)
5087 {
5088         struct intel_dp *intel_dp;
5089         struct drm_crtc *crtc;
5090         enum pipe pipe;
5091
5092         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5093                 return;
5094
5095         cancel_delayed_work(&dev_priv->drrs.work);
5096
5097         mutex_lock(&dev_priv->drrs.mutex);
5098
5099         intel_dp = dev_priv->drrs.dp;
5100         if (!intel_dp) {
5101                 mutex_unlock(&dev_priv->drrs.mutex);
5102                 return;
5103         }
5104
5105         crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
5106         pipe = to_intel_crtc(crtc)->pipe;
5107
5108         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5109         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5110
5111         /* flush means busy screen hence upclock */
5112         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5113                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5114                                         drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
5115
5116         /*
5117          * flush also means no more activity hence schedule downclock, if all
5118          * other fbs are quiescent too
5119          */
5120         if (!dev_priv->drrs.busy_frontbuffer_bits)
5121                 schedule_delayed_work(&dev_priv->drrs.work,
5122                                 msecs_to_jiffies(1000));
5123         mutex_unlock(&dev_priv->drrs.mutex);
5124 }
5125
5126 /**
5127  * DOC: Display Refresh Rate Switching (DRRS)
5128  *
5129  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5130  * which enables swtching between low and high refresh rates,
5131  * dynamically, based on the usage scenario. This feature is applicable
5132  * for internal panels.
5133  *
5134  * Indication that the panel supports DRRS is given by the panel EDID, which
5135  * would list multiple refresh rates for one resolution.
5136  *
5137  * DRRS is of 2 types - static and seamless.
5138  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5139  * (may appear as a blink on screen) and is used in dock-undock scenario.
5140  * Seamless DRRS involves changing RR without any visual effect to the user
5141  * and can be used during normal system usage. This is done by programming
5142  * certain registers.
5143  *
5144  * Support for static/seamless DRRS may be indicated in the VBT based on
5145  * inputs from the panel spec.
5146  *
5147  * DRRS saves power by switching to low RR based on usage scenarios.
5148  *
5149  * The implementation is based on frontbuffer tracking implementation.  When
5150  * there is a disturbance on the screen triggered by user activity or a periodic
5151  * system activity, DRRS is disabled (RR is changed to high RR).  When there is
5152  * no movement on screen, after a timeout of 1 second, a switch to low RR is
5153  * made.
5154  *
5155  * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
5156  * and intel_edp_drrs_flush() are called.
5157  *
5158  * DRRS can be further extended to support other internal panels and also
5159  * the scenario of video playback wherein RR is set based on the rate
5160  * requested by userspace.
5161  */
5162
5163 /**
5164  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5165  * @connector: eDP connector
5166  * @fixed_mode: preferred mode of panel
5167  *
5168  * This function is  called only once at driver load to initialize basic
5169  * DRRS stuff.
5170  *
5171  * Returns:
5172  * Downclock mode if panel supports it, else return NULL.
5173  * DRRS support is determined by the presence of downclock mode (apart
5174  * from VBT setting).
5175  */
5176 static struct drm_display_mode *
5177 intel_dp_drrs_init(struct intel_connector *connector,
5178                    struct drm_display_mode *fixed_mode)
5179 {
5180         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
5181         struct drm_display_mode *downclock_mode = NULL;
5182
5183         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5184         mutex_init(&dev_priv->drrs.mutex);
5185
5186         if (DISPLAY_VER(dev_priv) <= 6) {
5187                 drm_dbg_kms(&dev_priv->drm,
5188                             "DRRS supported for Gen7 and above\n");
5189                 return NULL;
5190         }
5191
5192         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5193                 drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
5194                 return NULL;
5195         }
5196
5197         downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
5198         if (!downclock_mode) {
5199                 drm_dbg_kms(&dev_priv->drm,
5200                             "Downclock mode is not found. DRRS not supported\n");
5201                 return NULL;
5202         }
5203
5204         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5205
5206         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5207         drm_dbg_kms(&dev_priv->drm,
5208                     "seamless DRRS supported for eDP panel.\n");
5209         return downclock_mode;
5210 }
5211
5212 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5213                                      struct intel_connector *intel_connector)
5214 {
5215         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5216         struct drm_device *dev = &dev_priv->drm;
5217         struct drm_connector *connector = &intel_connector->base;
5218         struct drm_display_mode *fixed_mode = NULL;
5219         struct drm_display_mode *downclock_mode = NULL;
5220         bool has_dpcd;
5221         enum pipe pipe = INVALID_PIPE;
5222         struct edid *edid;
5223
5224         if (!intel_dp_is_edp(intel_dp))
5225                 return true;
5226
5227         /*
5228          * On IBX/CPT we may get here with LVDS already registered. Since the
5229          * driver uses the only internal power sequencer available for both
5230          * eDP and LVDS bail out early in this case to prevent interfering
5231          * with an already powered-on LVDS power sequencer.
5232          */
5233         if (intel_get_lvds_encoder(dev_priv)) {
5234                 drm_WARN_ON(dev,
5235                             !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
5236                 drm_info(&dev_priv->drm,
5237                          "LVDS was detected, not registering eDP\n");
5238
5239                 return false;
5240         }
5241
5242         intel_pps_init(intel_dp);
5243
5244         /* Cache DPCD and EDID for edp. */
5245         has_dpcd = intel_edp_init_dpcd(intel_dp);
5246
5247         if (!has_dpcd) {
5248                 /* if this fails, presume the device is a ghost */
5249                 drm_info(&dev_priv->drm,
5250                          "failed to retrieve link info, disabling eDP\n");
5251                 goto out_vdd_off;
5252         }
5253
5254         mutex_lock(&dev->mode_config.mutex);
5255         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5256         if (edid) {
5257                 if (drm_add_edid_modes(connector, edid)) {
5258                         drm_connector_update_edid_property(connector, edid);
5259                 } else {
5260                         kfree(edid);
5261                         edid = ERR_PTR(-EINVAL);
5262                 }
5263         } else {
5264                 edid = ERR_PTR(-ENOENT);
5265         }
5266         intel_connector->edid = edid;
5267
5268         fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
5269         if (fixed_mode)
5270                 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
5271
5272         /* multiply the mode clock and horizontal timings for MSO */
5273         intel_edp_mso_mode_fixup(intel_connector, fixed_mode);
5274         intel_edp_mso_mode_fixup(intel_connector, downclock_mode);
5275
5276         /* fallback to VBT if available for eDP */
5277         if (!fixed_mode)
5278                 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
5279         mutex_unlock(&dev->mode_config.mutex);
5280
5281         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5282                 /*
5283                  * Figure out the current pipe for the initial backlight setup.
5284                  * If the current pipe isn't valid, try the PPS pipe, and if that
5285                  * fails just assume pipe A.
5286                  */
5287                 pipe = vlv_active_pipe(intel_dp);
5288
5289                 if (pipe != PIPE_A && pipe != PIPE_B)
5290                         pipe = intel_dp->pps.pps_pipe;
5291
5292                 if (pipe != PIPE_A && pipe != PIPE_B)
5293                         pipe = PIPE_A;
5294
5295                 drm_dbg_kms(&dev_priv->drm,
5296                             "using pipe %c for initial backlight setup\n",
5297                             pipe_name(pipe));
5298         }
5299
5300         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5301         if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
5302                 intel_connector->panel.backlight.power = intel_pps_backlight_power;
5303         intel_backlight_setup(intel_connector, pipe);
5304
5305         if (fixed_mode) {
5306                 drm_connector_set_panel_orientation_with_quirk(connector,
5307                                 dev_priv->vbt.orientation,
5308                                 fixed_mode->hdisplay, fixed_mode->vdisplay);
5309         }
5310
5311         return true;
5312
5313 out_vdd_off:
5314         intel_pps_vdd_off_sync(intel_dp);
5315
5316         return false;
5317 }
5318
5319 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
5320 {
5321         struct intel_connector *intel_connector;
5322         struct drm_connector *connector;
5323
5324         intel_connector = container_of(work, typeof(*intel_connector),
5325                                        modeset_retry_work);
5326         connector = &intel_connector->base;
5327         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
5328                       connector->name);
5329
5330         /* Grab the locks before changing connector property*/
5331         mutex_lock(&connector->dev->mode_config.mutex);
5332         /* Set connector link status to BAD and send a Uevent to notify
5333          * userspace to do a modeset.
5334          */
5335         drm_connector_set_link_status_property(connector,
5336                                                DRM_MODE_LINK_STATUS_BAD);
5337         mutex_unlock(&connector->dev->mode_config.mutex);
5338         /* Send Hotplug uevent so userspace can reprobe */
5339         drm_kms_helper_hotplug_event(connector->dev);
5340 }
5341
5342 bool
5343 intel_dp_init_connector(struct intel_digital_port *dig_port,
5344                         struct intel_connector *intel_connector)
5345 {
5346         struct drm_connector *connector = &intel_connector->base;
5347         struct intel_dp *intel_dp = &dig_port->dp;
5348         struct intel_encoder *intel_encoder = &dig_port->base;
5349         struct drm_device *dev = intel_encoder->base.dev;
5350         struct drm_i915_private *dev_priv = to_i915(dev);
5351         enum port port = intel_encoder->port;
5352         enum phy phy = intel_port_to_phy(dev_priv, port);
5353         int type;
5354
5355         /* Initialize the work for modeset in case of link train failure */
5356         INIT_WORK(&intel_connector->modeset_retry_work,
5357                   intel_dp_modeset_retry_work_fn);
5358
5359         if (drm_WARN(dev, dig_port->max_lanes < 1,
5360                      "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
5361                      dig_port->max_lanes, intel_encoder->base.base.id,
5362                      intel_encoder->base.name))
5363                 return false;
5364
5365         intel_dp_set_source_rates(intel_dp);
5366
5367         intel_dp->reset_link_params = true;
5368         intel_dp->pps.pps_pipe = INVALID_PIPE;
5369         intel_dp->pps.active_pipe = INVALID_PIPE;
5370
5371         /* Preserve the current hw state. */
5372         intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
5373         intel_dp->attached_connector = intel_connector;
5374
5375         if (intel_dp_is_port_edp(dev_priv, port)) {
5376                 /*
5377                  * Currently we don't support eDP on TypeC ports, although in
5378                  * theory it could work on TypeC legacy ports.
5379                  */
5380                 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
5381                 type = DRM_MODE_CONNECTOR_eDP;
5382         } else {
5383                 type = DRM_MODE_CONNECTOR_DisplayPort;
5384         }
5385
5386         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5387                 intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
5388
5389         /*
5390          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5391          * for DP the encoder type can be set by the caller to
5392          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5393          */
5394         if (type == DRM_MODE_CONNECTOR_eDP)
5395                 intel_encoder->type = INTEL_OUTPUT_EDP;
5396
5397         /* eDP only on port B and/or C on vlv/chv */
5398         if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
5399                               IS_CHERRYVIEW(dev_priv)) &&
5400                         intel_dp_is_edp(intel_dp) &&
5401                         port != PORT_B && port != PORT_C))
5402                 return false;
5403
5404         drm_dbg_kms(&dev_priv->drm,
5405                     "Adding %s connector on [ENCODER:%d:%s]\n",
5406                     type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5407                     intel_encoder->base.base.id, intel_encoder->base.name);
5408
5409         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5410         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5411
5412         if (!HAS_GMCH(dev_priv))
5413                 connector->interlace_allowed = true;
5414         connector->doublescan_allowed = 0;
5415
5416         intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
5417
5418         intel_dp_aux_init(intel_dp);
5419
5420         intel_connector_attach_encoder(intel_connector, intel_encoder);
5421
5422         if (HAS_DDI(dev_priv))
5423                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5424         else
5425                 intel_connector->get_hw_state = intel_connector_get_hw_state;
5426
5427         /* init MST on ports that can support it */
5428         intel_dp_mst_encoder_init(dig_port,
5429                                   intel_connector->base.base.id);
5430
5431         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5432                 intel_dp_aux_fini(intel_dp);
5433                 intel_dp_mst_encoder_cleanup(dig_port);
5434                 goto fail;
5435         }
5436
5437         intel_dp_add_properties(intel_dp, connector);
5438
5439         if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
5440                 int ret = intel_dp_hdcp_init(dig_port, intel_connector);
5441                 if (ret)
5442                         drm_dbg_kms(&dev_priv->drm,
5443                                     "HDCP init failed, skipping.\n");
5444         }
5445
5446         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5447          * 0xd.  Failure to do so will result in spurious interrupts being
5448          * generated on the port when a cable is not attached.
5449          */
5450         if (IS_G45(dev_priv)) {
5451                 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
5452                 intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
5453                                (temp & ~0xf) | 0xd);
5454         }
5455
5456         intel_dp->frl.is_trained = false;
5457         intel_dp->frl.trained_rate_gbps = 0;
5458
5459         intel_psr_init(intel_dp);
5460
5461         return true;
5462
5463 fail:
5464         drm_connector_cleanup(connector);
5465
5466         return false;
5467 }
5468
5469 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
5470 {
5471         struct intel_encoder *encoder;
5472
5473         if (!HAS_DISPLAY(dev_priv))
5474                 return;
5475
5476         for_each_intel_encoder(&dev_priv->drm, encoder) {
5477                 struct intel_dp *intel_dp;
5478
5479                 if (encoder->type != INTEL_OUTPUT_DDI)
5480                         continue;
5481
5482                 intel_dp = enc_to_intel_dp(encoder);
5483
5484                 if (!intel_dp->can_mst)
5485                         continue;
5486
5487                 if (intel_dp->is_mst)
5488                         drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
5489         }
5490 }
5491
5492 void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
5493 {
5494         struct intel_encoder *encoder;
5495
5496         if (!HAS_DISPLAY(dev_priv))
5497                 return;
5498
5499         for_each_intel_encoder(&dev_priv->drm, encoder) {
5500                 struct intel_dp *intel_dp;
5501                 int ret;
5502
5503                 if (encoder->type != INTEL_OUTPUT_DDI)
5504                         continue;
5505
5506                 intel_dp = enc_to_intel_dp(encoder);
5507
5508                 if (!intel_dp->can_mst)
5509                         continue;
5510
5511                 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
5512                                                      true);
5513                 if (ret) {
5514                         intel_dp->is_mst = false;
5515                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5516                                                         false);
5517                 }
5518         }
5519 }
This page took 0.380733 seconds and 4 git commands to generate.