]> Git Repo - J-linux.git/blob - drivers/gpu/drm/i915/display/intel_tc.c
Merge tag 'input-for-v6.7-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor...
[J-linux.git] / drivers / gpu / drm / i915 / display / intel_tc.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include "i915_drv.h"
7 #include "i915_reg.h"
8 #include "intel_atomic.h"
9 #include "intel_cx0_phy_regs.h"
10 #include "intel_ddi.h"
11 #include "intel_de.h"
12 #include "intel_display.h"
13 #include "intel_display_driver.h"
14 #include "intel_display_power_map.h"
15 #include "intel_display_types.h"
16 #include "intel_dkl_phy_regs.h"
17 #include "intel_dp.h"
18 #include "intel_dp_mst.h"
19 #include "intel_mg_phy_regs.h"
20 #include "intel_modeset_lock.h"
21 #include "intel_tc.h"
22
23 #define DP_PIN_ASSIGNMENT_C     0x3
24 #define DP_PIN_ASSIGNMENT_D     0x4
25 #define DP_PIN_ASSIGNMENT_E     0x5
26
27 enum tc_port_mode {
28         TC_PORT_DISCONNECTED,
29         TC_PORT_TBT_ALT,
30         TC_PORT_DP_ALT,
31         TC_PORT_LEGACY,
32 };
33
34 struct intel_tc_port;
35
36 struct intel_tc_phy_ops {
37         enum intel_display_power_domain (*cold_off_domain)(struct intel_tc_port *tc);
38         u32 (*hpd_live_status)(struct intel_tc_port *tc);
39         bool (*is_ready)(struct intel_tc_port *tc);
40         bool (*is_owned)(struct intel_tc_port *tc);
41         void (*get_hw_state)(struct intel_tc_port *tc);
42         bool (*connect)(struct intel_tc_port *tc, int required_lanes);
43         void (*disconnect)(struct intel_tc_port *tc);
44         void (*init)(struct intel_tc_port *tc);
45 };
46
47 struct intel_tc_port {
48         struct intel_digital_port *dig_port;
49
50         const struct intel_tc_phy_ops *phy_ops;
51
52         struct mutex lock;      /* protects the TypeC port mode */
53         intel_wakeref_t lock_wakeref;
54 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
55         enum intel_display_power_domain lock_power_domain;
56 #endif
57         struct delayed_work disconnect_phy_work;
58         struct delayed_work link_reset_work;
59         int link_refcount;
60         bool legacy_port:1;
61         const char *port_name;
62         enum tc_port_mode mode;
63         enum tc_port_mode init_mode;
64         enum phy_fia phy_fia;
65         u8 phy_fia_idx;
66 };
67
68 static enum intel_display_power_domain
69 tc_phy_cold_off_domain(struct intel_tc_port *);
70 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc);
71 static bool tc_phy_is_ready(struct intel_tc_port *tc);
72 static bool tc_phy_wait_for_ready(struct intel_tc_port *tc);
73 static enum tc_port_mode tc_phy_get_current_mode(struct intel_tc_port *tc);
74
75 static const char *tc_port_mode_name(enum tc_port_mode mode)
76 {
77         static const char * const names[] = {
78                 [TC_PORT_DISCONNECTED] = "disconnected",
79                 [TC_PORT_TBT_ALT] = "tbt-alt",
80                 [TC_PORT_DP_ALT] = "dp-alt",
81                 [TC_PORT_LEGACY] = "legacy",
82         };
83
84         if (WARN_ON(mode >= ARRAY_SIZE(names)))
85                 mode = TC_PORT_DISCONNECTED;
86
87         return names[mode];
88 }
89
90 static struct intel_tc_port *to_tc_port(struct intel_digital_port *dig_port)
91 {
92         return dig_port->tc;
93 }
94
95 static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc)
96 {
97         return to_i915(tc->dig_port->base.base.dev);
98 }
99
100 static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
101                                   enum tc_port_mode mode)
102 {
103         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
104         enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
105         struct intel_tc_port *tc = to_tc_port(dig_port);
106
107         return intel_phy_is_tc(i915, phy) && tc->mode == mode;
108 }
109
110 bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
111 {
112         return intel_tc_port_in_mode(dig_port, TC_PORT_TBT_ALT);
113 }
114
115 bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port)
116 {
117         return intel_tc_port_in_mode(dig_port, TC_PORT_DP_ALT);
118 }
119
120 bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
121 {
122         return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
123 }
124
125 /*
126  * The display power domains used for TC ports depending on the
127  * platform and TC mode (legacy, DP-alt, TBT):
128  *
129  * POWER_DOMAIN_DISPLAY_CORE:
130  * --------------------------
131  * ADLP/all modes:
132  *   - TCSS/IOM access for PHY ready state.
133  * ADLP+/all modes:
134  *   - DE/north-,south-HPD ISR access for HPD live state.
135  *
136  * POWER_DOMAIN_PORT_DDI_LANES_<port>:
137  * -----------------------------------
138  * ICL+/all modes:
139  *   - DE/DDI_BUF access for port enabled state.
140  * ADLP/all modes:
141  *   - DE/DDI_BUF access for PHY owned state.
142  *
143  * POWER_DOMAIN_AUX_USBC<TC port index>:
144  * -------------------------------------
145  * ICL/legacy mode:
146  *   - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
147  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
148  *     main lanes.
149  * ADLP/legacy, DP-alt modes:
150  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
151  *     main lanes.
152  *
153  * POWER_DOMAIN_TC_COLD_OFF:
154  * -------------------------
155  * ICL/DP-alt, TBT mode:
156  *   - TCSS/TBT: block TC-cold power state for using the (direct or
157  *     TBT DP-IN) AUX and main lanes.
158  *
159  * TGL/all modes:
160  *   - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
161  *   - TCSS/PHY: block TC-cold power state for using the (direct or
162  *     TBT DP-IN) AUX and main lanes.
163  *
164  * ADLP/TBT mode:
165  *   - TCSS/TBT: block TC-cold power state for using the (TBT DP-IN)
166  *     AUX and main lanes.
167  *
168  * XELPDP+/all modes:
169  *   - TCSS/IOM,FIA access for PHY ready, owned state
170  *   - TCSS/PHY: block TC-cold power state for using the (direct or
171  *     TBT DP-IN) AUX and main lanes.
172  */
173 bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
174 {
175         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
176         struct intel_tc_port *tc = to_tc_port(dig_port);
177
178         return tc_phy_cold_off_domain(tc) ==
179                intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
180 }
181
182 static intel_wakeref_t
183 __tc_cold_block(struct intel_tc_port *tc, enum intel_display_power_domain *domain)
184 {
185         struct drm_i915_private *i915 = tc_to_i915(tc);
186
187         *domain = tc_phy_cold_off_domain(tc);
188
189         return intel_display_power_get(i915, *domain);
190 }
191
192 static intel_wakeref_t
193 tc_cold_block(struct intel_tc_port *tc)
194 {
195         enum intel_display_power_domain domain;
196         intel_wakeref_t wakeref;
197
198         wakeref = __tc_cold_block(tc, &domain);
199 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
200         tc->lock_power_domain = domain;
201 #endif
202         return wakeref;
203 }
204
205 static void
206 __tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain domain,
207                   intel_wakeref_t wakeref)
208 {
209         struct drm_i915_private *i915 = tc_to_i915(tc);
210
211         intel_display_power_put(i915, domain, wakeref);
212 }
213
214 static void
215 tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref)
216 {
217         enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc);
218
219 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
220         drm_WARN_ON(&tc_to_i915(tc)->drm, tc->lock_power_domain != domain);
221 #endif
222         __tc_cold_unblock(tc, domain, wakeref);
223 }
224
225 static void
226 assert_display_core_power_enabled(struct intel_tc_port *tc)
227 {
228         struct drm_i915_private *i915 = tc_to_i915(tc);
229
230         drm_WARN_ON(&i915->drm,
231                     !intel_display_power_is_enabled(i915, POWER_DOMAIN_DISPLAY_CORE));
232 }
233
234 static void
235 assert_tc_cold_blocked(struct intel_tc_port *tc)
236 {
237         struct drm_i915_private *i915 = tc_to_i915(tc);
238         bool enabled;
239
240         enabled = intel_display_power_is_enabled(i915,
241                                                  tc_phy_cold_off_domain(tc));
242         drm_WARN_ON(&i915->drm, !enabled);
243 }
244
245 static enum intel_display_power_domain
246 tc_port_power_domain(struct intel_tc_port *tc)
247 {
248         struct drm_i915_private *i915 = tc_to_i915(tc);
249         enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port);
250
251         return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
252 }
253
254 static void
255 assert_tc_port_power_enabled(struct intel_tc_port *tc)
256 {
257         struct drm_i915_private *i915 = tc_to_i915(tc);
258
259         drm_WARN_ON(&i915->drm,
260                     !intel_display_power_is_enabled(i915, tc_port_power_domain(tc)));
261 }
262
263 static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
264 {
265         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
266         struct intel_tc_port *tc = to_tc_port(dig_port);
267         u32 lane_mask;
268
269         lane_mask = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
270
271         drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
272         assert_tc_cold_blocked(tc);
273
274         lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx);
275         return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
276 }
277
278 u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
279 {
280         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
281         struct intel_tc_port *tc = to_tc_port(dig_port);
282         u32 pin_mask;
283
284         pin_mask = intel_de_read(i915, PORT_TX_DFLEXPA1(tc->phy_fia));
285
286         drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
287         assert_tc_cold_blocked(tc);
288
289         return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >>
290                DP_PIN_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
291 }
292
293 static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
294 {
295         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
296         enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
297         intel_wakeref_t wakeref;
298         u32 val, pin_assignment;
299
300         with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
301                 val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
302
303         pin_assignment =
304                 REG_FIELD_GET(TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK, val);
305
306         switch (pin_assignment) {
307         default:
308                 MISSING_CASE(pin_assignment);
309                 fallthrough;
310         case DP_PIN_ASSIGNMENT_D:
311                 return 2;
312         case DP_PIN_ASSIGNMENT_C:
313         case DP_PIN_ASSIGNMENT_E:
314                 return 4;
315         }
316 }
317
318 static int mtl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
319 {
320         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
321         intel_wakeref_t wakeref;
322         u32 pin_mask;
323
324         with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
325                 pin_mask = intel_tc_port_get_pin_assignment_mask(dig_port);
326
327         switch (pin_mask) {
328         default:
329                 MISSING_CASE(pin_mask);
330                 fallthrough;
331         case DP_PIN_ASSIGNMENT_D:
332                 return 2;
333         case DP_PIN_ASSIGNMENT_C:
334         case DP_PIN_ASSIGNMENT_E:
335                 return 4;
336         }
337 }
338
339 static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
340 {
341         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
342         intel_wakeref_t wakeref;
343         u32 lane_mask = 0;
344
345         with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
346                 lane_mask = intel_tc_port_get_lane_mask(dig_port);
347
348         switch (lane_mask) {
349         default:
350                 MISSING_CASE(lane_mask);
351                 fallthrough;
352         case 0x1:
353         case 0x2:
354         case 0x4:
355         case 0x8:
356                 return 1;
357         case 0x3:
358         case 0xc:
359                 return 2;
360         case 0xf:
361                 return 4;
362         }
363 }
364
365 int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
366 {
367         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
368         struct intel_tc_port *tc = to_tc_port(dig_port);
369         enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
370
371         if (!intel_phy_is_tc(i915, phy) || tc->mode != TC_PORT_DP_ALT)
372                 return 4;
373
374         assert_tc_cold_blocked(tc);
375
376         if (DISPLAY_VER(i915) >= 20)
377                 return lnl_tc_port_get_max_lane_count(dig_port);
378
379         if (DISPLAY_VER(i915) >= 14)
380                 return mtl_tc_port_get_max_lane_count(dig_port);
381
382         return intel_tc_port_get_max_lane_count(dig_port);
383 }
384
385 void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
386                                       int required_lanes)
387 {
388         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
389         struct intel_tc_port *tc = to_tc_port(dig_port);
390         bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
391         u32 val;
392
393         drm_WARN_ON(&i915->drm,
394                     lane_reversal && tc->mode != TC_PORT_LEGACY);
395
396         assert_tc_cold_blocked(tc);
397
398         val = intel_de_read(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
399         val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx);
400
401         switch (required_lanes) {
402         case 1:
403                 val |= lane_reversal ?
404                         DFLEXDPMLE1_DPMLETC_ML3(tc->phy_fia_idx) :
405                         DFLEXDPMLE1_DPMLETC_ML0(tc->phy_fia_idx);
406                 break;
407         case 2:
408                 val |= lane_reversal ?
409                         DFLEXDPMLE1_DPMLETC_ML3_2(tc->phy_fia_idx) :
410                         DFLEXDPMLE1_DPMLETC_ML1_0(tc->phy_fia_idx);
411                 break;
412         case 4:
413                 val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc->phy_fia_idx);
414                 break;
415         default:
416                 MISSING_CASE(required_lanes);
417         }
418
419         intel_de_write(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
420 }
421
422 static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
423                                       u32 live_status_mask)
424 {
425         struct drm_i915_private *i915 = tc_to_i915(tc);
426         u32 valid_hpd_mask;
427
428         drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
429
430         if (hweight32(live_status_mask) != 1)
431                 return;
432
433         if (tc->legacy_port)
434                 valid_hpd_mask = BIT(TC_PORT_LEGACY);
435         else
436                 valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
437                                  BIT(TC_PORT_TBT_ALT);
438
439         if (!(live_status_mask & ~valid_hpd_mask))
440                 return;
441
442         /* If live status mismatches the VBT flag, trust the live status. */
443         drm_dbg_kms(&i915->drm,
444                     "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
445                     tc->port_name, live_status_mask, valid_hpd_mask);
446
447         tc->legacy_port = !tc->legacy_port;
448 }
449
450 static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia)
451 {
452         struct drm_i915_private *i915 = tc_to_i915(tc);
453         enum port port = tc->dig_port->base.port;
454         enum tc_port tc_port = intel_port_to_tc(i915, port);
455
456         /*
457          * Each Modular FIA instance houses 2 TC ports. In SOC that has more
458          * than two TC ports, there are multiple instances of Modular FIA.
459          */
460         if (modular_fia) {
461                 tc->phy_fia = tc_port / 2;
462                 tc->phy_fia_idx = tc_port % 2;
463         } else {
464                 tc->phy_fia = FIA1;
465                 tc->phy_fia_idx = tc_port;
466         }
467 }
468
469 /*
470  * ICL TC PHY handlers
471  * -------------------
472  */
473 static enum intel_display_power_domain
474 icl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
475 {
476         struct drm_i915_private *i915 = tc_to_i915(tc);
477         struct intel_digital_port *dig_port = tc->dig_port;
478
479         if (tc->legacy_port)
480                 return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
481
482         return POWER_DOMAIN_TC_COLD_OFF;
483 }
484
485 static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
486 {
487         struct drm_i915_private *i915 = tc_to_i915(tc);
488         struct intel_digital_port *dig_port = tc->dig_port;
489         u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
490         intel_wakeref_t wakeref;
491         u32 fia_isr;
492         u32 pch_isr;
493         u32 mask = 0;
494
495         with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref) {
496                 fia_isr = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
497                 pch_isr = intel_de_read(i915, SDEISR);
498         }
499
500         if (fia_isr == 0xffffffff) {
501                 drm_dbg_kms(&i915->drm,
502                             "Port %s: PHY in TCCOLD, nothing connected\n",
503                             tc->port_name);
504                 return mask;
505         }
506
507         if (fia_isr & TC_LIVE_STATE_TBT(tc->phy_fia_idx))
508                 mask |= BIT(TC_PORT_TBT_ALT);
509         if (fia_isr & TC_LIVE_STATE_TC(tc->phy_fia_idx))
510                 mask |= BIT(TC_PORT_DP_ALT);
511
512         if (pch_isr & isr_bit)
513                 mask |= BIT(TC_PORT_LEGACY);
514
515         return mask;
516 }
517
518 /*
519  * Return the PHY status complete flag indicating that display can acquire the
520  * PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink
521  * is connected and it's ready to switch the ownership to display. The flag
522  * will be left cleared when a TBT-alt sink is connected, where the PHY is
523  * owned by the TBT subsystem and so switching the ownership to display is not
524  * required.
525  */
526 static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
527 {
528         struct drm_i915_private *i915 = tc_to_i915(tc);
529         u32 val;
530
531         assert_tc_cold_blocked(tc);
532
533         val = intel_de_read(i915, PORT_TX_DFLEXDPPMS(tc->phy_fia));
534         if (val == 0xffffffff) {
535                 drm_dbg_kms(&i915->drm,
536                             "Port %s: PHY in TCCOLD, assuming not ready\n",
537                             tc->port_name);
538                 return false;
539         }
540
541         return val & DP_PHY_MODE_STATUS_COMPLETED(tc->phy_fia_idx);
542 }
543
544 static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
545                                       bool take)
546 {
547         struct drm_i915_private *i915 = tc_to_i915(tc);
548         u32 val;
549
550         assert_tc_cold_blocked(tc);
551
552         val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
553         if (val == 0xffffffff) {
554                 drm_dbg_kms(&i915->drm,
555                             "Port %s: PHY in TCCOLD, can't %s ownership\n",
556                             tc->port_name, take ? "take" : "release");
557
558                 return false;
559         }
560
561         val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
562         if (take)
563                 val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
564
565         intel_de_write(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
566
567         return true;
568 }
569
570 static bool icl_tc_phy_is_owned(struct intel_tc_port *tc)
571 {
572         struct drm_i915_private *i915 = tc_to_i915(tc);
573         u32 val;
574
575         assert_tc_cold_blocked(tc);
576
577         val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
578         if (val == 0xffffffff) {
579                 drm_dbg_kms(&i915->drm,
580                             "Port %s: PHY in TCCOLD, assume not owned\n",
581                             tc->port_name);
582                 return false;
583         }
584
585         return val & DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
586 }
587
588 static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
589 {
590         enum intel_display_power_domain domain;
591         intel_wakeref_t tc_cold_wref;
592
593         tc_cold_wref = __tc_cold_block(tc, &domain);
594
595         tc->mode = tc_phy_get_current_mode(tc);
596         if (tc->mode != TC_PORT_DISCONNECTED)
597                 tc->lock_wakeref = tc_cold_block(tc);
598
599         __tc_cold_unblock(tc, domain, tc_cold_wref);
600 }
601
602 /*
603  * This function implements the first part of the Connect Flow described by our
604  * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
605  * lanes, EDID, etc) is done as needed in the typical places.
606  *
607  * Unlike the other ports, type-C ports are not available to use as soon as we
608  * get a hotplug. The type-C PHYs can be shared between multiple controllers:
609  * display, USB, etc. As a result, handshaking through FIA is required around
610  * connect and disconnect to cleanly transfer ownership with the controller and
611  * set the type-C power state.
612  */
613 static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
614                                                 int required_lanes)
615 {
616         struct drm_i915_private *i915 = tc_to_i915(tc);
617         struct intel_digital_port *dig_port = tc->dig_port;
618         int max_lanes;
619
620         max_lanes = intel_tc_port_max_lane_count(dig_port);
621         if (tc->mode == TC_PORT_LEGACY) {
622                 drm_WARN_ON(&i915->drm, max_lanes != 4);
623                 return true;
624         }
625
626         drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DP_ALT);
627
628         /*
629          * Now we have to re-check the live state, in case the port recently
630          * became disconnected. Not necessary for legacy mode.
631          */
632         if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) {
633                 drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
634                             tc->port_name);
635                 return false;
636         }
637
638         if (max_lanes < required_lanes) {
639                 drm_dbg_kms(&i915->drm,
640                             "Port %s: PHY max lanes %d < required lanes %d\n",
641                             tc->port_name,
642                             max_lanes, required_lanes);
643                 return false;
644         }
645
646         return true;
647 }
648
649 static bool icl_tc_phy_connect(struct intel_tc_port *tc,
650                                int required_lanes)
651 {
652         struct drm_i915_private *i915 = tc_to_i915(tc);
653
654         tc->lock_wakeref = tc_cold_block(tc);
655
656         if (tc->mode == TC_PORT_TBT_ALT)
657                 return true;
658
659         if ((!tc_phy_is_ready(tc) ||
660              !icl_tc_phy_take_ownership(tc, true)) &&
661             !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
662                 drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership (ready %s)\n",
663                             tc->port_name,
664                             str_yes_no(tc_phy_is_ready(tc)));
665                 goto out_unblock_tc_cold;
666         }
667
668
669         if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
670                 goto out_release_phy;
671
672         return true;
673
674 out_release_phy:
675         icl_tc_phy_take_ownership(tc, false);
676 out_unblock_tc_cold:
677         tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
678
679         return false;
680 }
681
682 /*
683  * See the comment at the connect function. This implements the Disconnect
684  * Flow.
685  */
686 static void icl_tc_phy_disconnect(struct intel_tc_port *tc)
687 {
688         switch (tc->mode) {
689         case TC_PORT_LEGACY:
690         case TC_PORT_DP_ALT:
691                 icl_tc_phy_take_ownership(tc, false);
692                 fallthrough;
693         case TC_PORT_TBT_ALT:
694                 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
695                 break;
696         default:
697                 MISSING_CASE(tc->mode);
698         }
699 }
700
701 static void icl_tc_phy_init(struct intel_tc_port *tc)
702 {
703         tc_phy_load_fia_params(tc, false);
704 }
705
706 static const struct intel_tc_phy_ops icl_tc_phy_ops = {
707         .cold_off_domain = icl_tc_phy_cold_off_domain,
708         .hpd_live_status = icl_tc_phy_hpd_live_status,
709         .is_ready = icl_tc_phy_is_ready,
710         .is_owned = icl_tc_phy_is_owned,
711         .get_hw_state = icl_tc_phy_get_hw_state,
712         .connect = icl_tc_phy_connect,
713         .disconnect = icl_tc_phy_disconnect,
714         .init = icl_tc_phy_init,
715 };
716
717 /*
718  * TGL TC PHY handlers
719  * -------------------
720  */
721 static enum intel_display_power_domain
722 tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
723 {
724         return POWER_DOMAIN_TC_COLD_OFF;
725 }
726
727 static void tgl_tc_phy_init(struct intel_tc_port *tc)
728 {
729         struct drm_i915_private *i915 = tc_to_i915(tc);
730         intel_wakeref_t wakeref;
731         u32 val;
732
733         with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref)
734                 val = intel_de_read(i915, PORT_TX_DFLEXDPSP(FIA1));
735
736         drm_WARN_ON(&i915->drm, val == 0xffffffff);
737
738         tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK);
739 }
740
741 static const struct intel_tc_phy_ops tgl_tc_phy_ops = {
742         .cold_off_domain = tgl_tc_phy_cold_off_domain,
743         .hpd_live_status = icl_tc_phy_hpd_live_status,
744         .is_ready = icl_tc_phy_is_ready,
745         .is_owned = icl_tc_phy_is_owned,
746         .get_hw_state = icl_tc_phy_get_hw_state,
747         .connect = icl_tc_phy_connect,
748         .disconnect = icl_tc_phy_disconnect,
749         .init = tgl_tc_phy_init,
750 };
751
752 /*
753  * ADLP TC PHY handlers
754  * --------------------
755  */
756 static enum intel_display_power_domain
757 adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc)
758 {
759         struct drm_i915_private *i915 = tc_to_i915(tc);
760         struct intel_digital_port *dig_port = tc->dig_port;
761
762         if (tc->mode != TC_PORT_TBT_ALT)
763                 return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
764
765         return POWER_DOMAIN_TC_COLD_OFF;
766 }
767
768 static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
769 {
770         struct drm_i915_private *i915 = tc_to_i915(tc);
771         struct intel_digital_port *dig_port = tc->dig_port;
772         enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
773         u32 cpu_isr_bits = i915->display.hotplug.hpd[hpd_pin];
774         u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
775         intel_wakeref_t wakeref;
776         u32 cpu_isr;
777         u32 pch_isr;
778         u32 mask = 0;
779
780         with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
781                 cpu_isr = intel_de_read(i915, GEN11_DE_HPD_ISR);
782                 pch_isr = intel_de_read(i915, SDEISR);
783         }
784
785         if (cpu_isr & (cpu_isr_bits & GEN11_DE_TC_HOTPLUG_MASK))
786                 mask |= BIT(TC_PORT_DP_ALT);
787         if (cpu_isr & (cpu_isr_bits & GEN11_DE_TBT_HOTPLUG_MASK))
788                 mask |= BIT(TC_PORT_TBT_ALT);
789
790         if (pch_isr & pch_isr_bit)
791                 mask |= BIT(TC_PORT_LEGACY);
792
793         return mask;
794 }
795
796 /*
797  * Return the PHY status complete flag indicating that display can acquire the
798  * PHY ownership. The IOM firmware sets this flag when it's ready to switch
799  * the ownership to display, regardless of what sink is connected (TBT-alt,
800  * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT
801  * subsystem and so switching the ownership to display is not required.
802  */
803 static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
804 {
805         struct drm_i915_private *i915 = tc_to_i915(tc);
806         enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port);
807         u32 val;
808
809         assert_display_core_power_enabled(tc);
810
811         val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
812         if (val == 0xffffffff) {
813                 drm_dbg_kms(&i915->drm,
814                             "Port %s: PHY in TCCOLD, assuming not ready\n",
815                             tc->port_name);
816                 return false;
817         }
818
819         return val & TCSS_DDI_STATUS_READY;
820 }
821
822 static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
823                                        bool take)
824 {
825         struct drm_i915_private *i915 = tc_to_i915(tc);
826         enum port port = tc->dig_port->base.port;
827
828         assert_tc_port_power_enabled(tc);
829
830         intel_de_rmw(i915, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
831                      take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
832
833         return true;
834 }
835
836 static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc)
837 {
838         struct drm_i915_private *i915 = tc_to_i915(tc);
839         enum port port = tc->dig_port->base.port;
840         u32 val;
841
842         assert_tc_port_power_enabled(tc);
843
844         val = intel_de_read(i915, DDI_BUF_CTL(port));
845         return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
846 }
847
848 static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
849 {
850         struct drm_i915_private *i915 = tc_to_i915(tc);
851         enum intel_display_power_domain port_power_domain =
852                 tc_port_power_domain(tc);
853         intel_wakeref_t port_wakeref;
854
855         port_wakeref = intel_display_power_get(i915, port_power_domain);
856
857         tc->mode = tc_phy_get_current_mode(tc);
858         if (tc->mode != TC_PORT_DISCONNECTED)
859                 tc->lock_wakeref = tc_cold_block(tc);
860
861         intel_display_power_put(i915, port_power_domain, port_wakeref);
862 }
863
864 static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
865 {
866         struct drm_i915_private *i915 = tc_to_i915(tc);
867         enum intel_display_power_domain port_power_domain =
868                 tc_port_power_domain(tc);
869         intel_wakeref_t port_wakeref;
870
871         if (tc->mode == TC_PORT_TBT_ALT) {
872                 tc->lock_wakeref = tc_cold_block(tc);
873                 return true;
874         }
875
876         port_wakeref = intel_display_power_get(i915, port_power_domain);
877
878         if (!adlp_tc_phy_take_ownership(tc, true) &&
879             !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
880                 drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership\n",
881                             tc->port_name);
882                 goto out_put_port_power;
883         }
884
885         if (!tc_phy_is_ready(tc) &&
886             !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
887                 drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
888                             tc->port_name);
889                 goto out_release_phy;
890         }
891
892         tc->lock_wakeref = tc_cold_block(tc);
893
894         if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
895                 goto out_unblock_tc_cold;
896
897         intel_display_power_put(i915, port_power_domain, port_wakeref);
898
899         return true;
900
901 out_unblock_tc_cold:
902         tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
903 out_release_phy:
904         adlp_tc_phy_take_ownership(tc, false);
905 out_put_port_power:
906         intel_display_power_put(i915, port_power_domain, port_wakeref);
907
908         return false;
909 }
910
911 static void adlp_tc_phy_disconnect(struct intel_tc_port *tc)
912 {
913         struct drm_i915_private *i915 = tc_to_i915(tc);
914         enum intel_display_power_domain port_power_domain =
915                 tc_port_power_domain(tc);
916         intel_wakeref_t port_wakeref;
917
918         port_wakeref = intel_display_power_get(i915, port_power_domain);
919
920         tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
921
922         switch (tc->mode) {
923         case TC_PORT_LEGACY:
924         case TC_PORT_DP_ALT:
925                 adlp_tc_phy_take_ownership(tc, false);
926                 fallthrough;
927         case TC_PORT_TBT_ALT:
928                 break;
929         default:
930                 MISSING_CASE(tc->mode);
931         }
932
933         intel_display_power_put(i915, port_power_domain, port_wakeref);
934 }
935
936 static void adlp_tc_phy_init(struct intel_tc_port *tc)
937 {
938         tc_phy_load_fia_params(tc, true);
939 }
940
941 static const struct intel_tc_phy_ops adlp_tc_phy_ops = {
942         .cold_off_domain = adlp_tc_phy_cold_off_domain,
943         .hpd_live_status = adlp_tc_phy_hpd_live_status,
944         .is_ready = adlp_tc_phy_is_ready,
945         .is_owned = adlp_tc_phy_is_owned,
946         .get_hw_state = adlp_tc_phy_get_hw_state,
947         .connect = adlp_tc_phy_connect,
948         .disconnect = adlp_tc_phy_disconnect,
949         .init = adlp_tc_phy_init,
950 };
951
952 /*
953  * XELPDP TC PHY handlers
954  * ----------------------
955  */
956 static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
957 {
958         struct drm_i915_private *i915 = tc_to_i915(tc);
959         struct intel_digital_port *dig_port = tc->dig_port;
960         enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
961         u32 pica_isr_bits = i915->display.hotplug.hpd[hpd_pin];
962         u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
963         intel_wakeref_t wakeref;
964         u32 pica_isr;
965         u32 pch_isr;
966         u32 mask = 0;
967
968         with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
969                 pica_isr = intel_de_read(i915, PICAINTERRUPT_ISR);
970                 pch_isr = intel_de_read(i915, SDEISR);
971         }
972
973         if (pica_isr & (pica_isr_bits & XELPDP_DP_ALT_HOTPLUG_MASK))
974                 mask |= BIT(TC_PORT_DP_ALT);
975         if (pica_isr & (pica_isr_bits & XELPDP_TBT_HOTPLUG_MASK))
976                 mask |= BIT(TC_PORT_TBT_ALT);
977
978         if (tc->legacy_port && (pch_isr & pch_isr_bit))
979                 mask |= BIT(TC_PORT_LEGACY);
980
981         return mask;
982 }
983
984 static bool
985 xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc)
986 {
987         struct drm_i915_private *i915 = tc_to_i915(tc);
988         enum port port = tc->dig_port->base.port;
989
990         assert_tc_cold_blocked(tc);
991
992         return intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_TCSS_POWER_STATE;
993 }
994
995 static bool
996 xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
997 {
998         struct drm_i915_private *i915 = tc_to_i915(tc);
999
1000         if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) {
1001                 drm_dbg_kms(&i915->drm,
1002                             "Port %s: timeout waiting for TCSS power to get %s\n",
1003                             enabled ? "enabled" : "disabled",
1004                             tc->port_name);
1005                 return false;
1006         }
1007
1008         return true;
1009 }
1010
1011 static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
1012 {
1013         struct drm_i915_private *i915 = tc_to_i915(tc);
1014         enum port port = tc->dig_port->base.port;
1015         u32 val;
1016
1017         assert_tc_cold_blocked(tc);
1018
1019         val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port));
1020         if (enable)
1021                 val |= XELPDP_TCSS_POWER_REQUEST;
1022         else
1023                 val &= ~XELPDP_TCSS_POWER_REQUEST;
1024         intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val);
1025 }
1026
1027 static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
1028 {
1029         struct drm_i915_private *i915 = tc_to_i915(tc);
1030
1031         __xelpdp_tc_phy_enable_tcss_power(tc, enable);
1032
1033         if ((!tc_phy_wait_for_ready(tc) ||
1034              !xelpdp_tc_phy_wait_for_tcss_power(tc, enable)) &&
1035             !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
1036                 if (enable) {
1037                         __xelpdp_tc_phy_enable_tcss_power(tc, false);
1038                         xelpdp_tc_phy_wait_for_tcss_power(tc, false);
1039                 }
1040
1041                 return false;
1042         }
1043
1044         return true;
1045 }
1046
1047 static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
1048 {
1049         struct drm_i915_private *i915 = tc_to_i915(tc);
1050         enum port port = tc->dig_port->base.port;
1051         u32 val;
1052
1053         assert_tc_cold_blocked(tc);
1054
1055         val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port));
1056         if (take)
1057                 val |= XELPDP_TC_PHY_OWNERSHIP;
1058         else
1059                 val &= ~XELPDP_TC_PHY_OWNERSHIP;
1060         intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val);
1061 }
1062
1063 static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc)
1064 {
1065         struct drm_i915_private *i915 = tc_to_i915(tc);
1066         enum port port = tc->dig_port->base.port;
1067
1068         assert_tc_cold_blocked(tc);
1069
1070         return intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_TC_PHY_OWNERSHIP;
1071 }
1072
1073 static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
1074 {
1075         struct drm_i915_private *i915 = tc_to_i915(tc);
1076         intel_wakeref_t tc_cold_wref;
1077         enum intel_display_power_domain domain;
1078
1079         tc_cold_wref = __tc_cold_block(tc, &domain);
1080
1081         tc->mode = tc_phy_get_current_mode(tc);
1082         if (tc->mode != TC_PORT_DISCONNECTED)
1083                 tc->lock_wakeref = tc_cold_block(tc);
1084
1085         drm_WARN_ON(&i915->drm,
1086                     (tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) &&
1087                     !xelpdp_tc_phy_tcss_power_is_enabled(tc));
1088
1089         __tc_cold_unblock(tc, domain, tc_cold_wref);
1090 }
1091
1092 static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1093 {
1094         tc->lock_wakeref = tc_cold_block(tc);
1095
1096         if (tc->mode == TC_PORT_TBT_ALT)
1097                 return true;
1098
1099         if (!xelpdp_tc_phy_enable_tcss_power(tc, true))
1100                 goto out_unblock_tccold;
1101
1102         xelpdp_tc_phy_take_ownership(tc, true);
1103
1104         if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
1105                 goto out_release_phy;
1106
1107         return true;
1108
1109 out_release_phy:
1110         xelpdp_tc_phy_take_ownership(tc, false);
1111         xelpdp_tc_phy_wait_for_tcss_power(tc, false);
1112
1113 out_unblock_tccold:
1114         tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
1115
1116         return false;
1117 }
1118
1119 static void xelpdp_tc_phy_disconnect(struct intel_tc_port *tc)
1120 {
1121         switch (tc->mode) {
1122         case TC_PORT_LEGACY:
1123         case TC_PORT_DP_ALT:
1124                 xelpdp_tc_phy_take_ownership(tc, false);
1125                 xelpdp_tc_phy_enable_tcss_power(tc, false);
1126                 fallthrough;
1127         case TC_PORT_TBT_ALT:
1128                 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
1129                 break;
1130         default:
1131                 MISSING_CASE(tc->mode);
1132         }
1133 }
1134
1135 static const struct intel_tc_phy_ops xelpdp_tc_phy_ops = {
1136         .cold_off_domain = tgl_tc_phy_cold_off_domain,
1137         .hpd_live_status = xelpdp_tc_phy_hpd_live_status,
1138         .is_ready = adlp_tc_phy_is_ready,
1139         .is_owned = xelpdp_tc_phy_is_owned,
1140         .get_hw_state = xelpdp_tc_phy_get_hw_state,
1141         .connect = xelpdp_tc_phy_connect,
1142         .disconnect = xelpdp_tc_phy_disconnect,
1143         .init = adlp_tc_phy_init,
1144 };
1145
1146 /*
1147  * Generic TC PHY handlers
1148  * -----------------------
1149  */
1150 static enum intel_display_power_domain
1151 tc_phy_cold_off_domain(struct intel_tc_port *tc)
1152 {
1153         return tc->phy_ops->cold_off_domain(tc);
1154 }
1155
1156 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc)
1157 {
1158         struct drm_i915_private *i915 = tc_to_i915(tc);
1159         u32 mask;
1160
1161         mask = tc->phy_ops->hpd_live_status(tc);
1162
1163         /* The sink can be connected only in a single mode. */
1164         drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1);
1165
1166         return mask;
1167 }
1168
1169 static bool tc_phy_is_ready(struct intel_tc_port *tc)
1170 {
1171         return tc->phy_ops->is_ready(tc);
1172 }
1173
1174 static bool tc_phy_is_owned(struct intel_tc_port *tc)
1175 {
1176         return tc->phy_ops->is_owned(tc);
1177 }
1178
1179 static void tc_phy_get_hw_state(struct intel_tc_port *tc)
1180 {
1181         tc->phy_ops->get_hw_state(tc);
1182 }
1183
1184 static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc,
1185                                       bool phy_is_ready, bool phy_is_owned)
1186 {
1187         struct drm_i915_private *i915 = tc_to_i915(tc);
1188
1189         drm_WARN_ON(&i915->drm, phy_is_owned && !phy_is_ready);
1190
1191         return phy_is_ready && phy_is_owned;
1192 }
1193
1194 static bool tc_phy_is_connected(struct intel_tc_port *tc,
1195                                 enum icl_port_dpll_id port_pll_type)
1196 {
1197         struct intel_encoder *encoder = &tc->dig_port->base;
1198         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1199         bool phy_is_ready = tc_phy_is_ready(tc);
1200         bool phy_is_owned = tc_phy_is_owned(tc);
1201         bool is_connected;
1202
1203         if (tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned))
1204                 is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY;
1205         else
1206                 is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
1207
1208         drm_dbg_kms(&i915->drm,
1209                     "Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n",
1210                     tc->port_name,
1211                     str_yes_no(is_connected),
1212                     str_yes_no(phy_is_ready),
1213                     str_yes_no(phy_is_owned),
1214                     port_pll_type == ICL_PORT_DPLL_DEFAULT ? "tbt" : "non-tbt");
1215
1216         return is_connected;
1217 }
1218
1219 static bool tc_phy_wait_for_ready(struct intel_tc_port *tc)
1220 {
1221         struct drm_i915_private *i915 = tc_to_i915(tc);
1222
1223         if (wait_for(tc_phy_is_ready(tc), 500)) {
1224                 drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n",
1225                         tc->port_name);
1226
1227                 return false;
1228         }
1229
1230         return true;
1231 }
1232
1233 static enum tc_port_mode
1234 hpd_mask_to_tc_mode(u32 live_status_mask)
1235 {
1236         if (live_status_mask)
1237                 return fls(live_status_mask) - 1;
1238
1239         return TC_PORT_DISCONNECTED;
1240 }
1241
1242 static enum tc_port_mode
1243 tc_phy_hpd_live_mode(struct intel_tc_port *tc)
1244 {
1245         u32 live_status_mask = tc_phy_hpd_live_status(tc);
1246
1247         return hpd_mask_to_tc_mode(live_status_mask);
1248 }
1249
1250 static enum tc_port_mode
1251 get_tc_mode_in_phy_owned_state(struct intel_tc_port *tc,
1252                                enum tc_port_mode live_mode)
1253 {
1254         switch (live_mode) {
1255         case TC_PORT_LEGACY:
1256         case TC_PORT_DP_ALT:
1257                 return live_mode;
1258         default:
1259                 MISSING_CASE(live_mode);
1260                 fallthrough;
1261         case TC_PORT_TBT_ALT:
1262         case TC_PORT_DISCONNECTED:
1263                 if (tc->legacy_port)
1264                         return TC_PORT_LEGACY;
1265                 else
1266                         return TC_PORT_DP_ALT;
1267         }
1268 }
1269
1270 static enum tc_port_mode
1271 get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc,
1272                                    enum tc_port_mode live_mode)
1273 {
1274         switch (live_mode) {
1275         case TC_PORT_LEGACY:
1276                 return TC_PORT_DISCONNECTED;
1277         case TC_PORT_DP_ALT:
1278         case TC_PORT_TBT_ALT:
1279                 return TC_PORT_TBT_ALT;
1280         default:
1281                 MISSING_CASE(live_mode);
1282                 fallthrough;
1283         case TC_PORT_DISCONNECTED:
1284                 if (tc->legacy_port)
1285                         return TC_PORT_DISCONNECTED;
1286                 else
1287                         return TC_PORT_TBT_ALT;
1288         }
1289 }
1290
1291 static enum tc_port_mode
1292 tc_phy_get_current_mode(struct intel_tc_port *tc)
1293 {
1294         struct drm_i915_private *i915 = tc_to_i915(tc);
1295         enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc);
1296         bool phy_is_ready;
1297         bool phy_is_owned;
1298         enum tc_port_mode mode;
1299
1300         /*
1301          * For legacy ports the IOM firmware initializes the PHY during boot-up
1302          * and system resume whether or not a sink is connected. Wait here for
1303          * the initialization to get ready.
1304          */
1305         if (tc->legacy_port)
1306                 tc_phy_wait_for_ready(tc);
1307
1308         phy_is_ready = tc_phy_is_ready(tc);
1309         phy_is_owned = tc_phy_is_owned(tc);
1310
1311         if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) {
1312                 mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode);
1313         } else {
1314                 drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT);
1315                 mode = get_tc_mode_in_phy_owned_state(tc, live_mode);
1316         }
1317
1318         drm_dbg_kms(&i915->drm,
1319                     "Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n",
1320                     tc->port_name,
1321                     tc_port_mode_name(mode),
1322                     str_yes_no(phy_is_ready),
1323                     str_yes_no(phy_is_owned),
1324                     tc_port_mode_name(live_mode));
1325
1326         return mode;
1327 }
1328
1329 static enum tc_port_mode default_tc_mode(struct intel_tc_port *tc)
1330 {
1331         if (tc->legacy_port)
1332                 return TC_PORT_LEGACY;
1333
1334         return TC_PORT_TBT_ALT;
1335 }
1336
1337 static enum tc_port_mode
1338 hpd_mask_to_target_mode(struct intel_tc_port *tc, u32 live_status_mask)
1339 {
1340         enum tc_port_mode mode = hpd_mask_to_tc_mode(live_status_mask);
1341
1342         if (mode != TC_PORT_DISCONNECTED)
1343                 return mode;
1344
1345         return default_tc_mode(tc);
1346 }
1347
1348 static enum tc_port_mode
1349 tc_phy_get_target_mode(struct intel_tc_port *tc)
1350 {
1351         u32 live_status_mask = tc_phy_hpd_live_status(tc);
1352
1353         return hpd_mask_to_target_mode(tc, live_status_mask);
1354 }
1355
1356 static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1357 {
1358         struct drm_i915_private *i915 = tc_to_i915(tc);
1359         u32 live_status_mask = tc_phy_hpd_live_status(tc);
1360         bool connected;
1361
1362         tc_port_fixup_legacy_flag(tc, live_status_mask);
1363
1364         tc->mode = hpd_mask_to_target_mode(tc, live_status_mask);
1365
1366         connected = tc->phy_ops->connect(tc, required_lanes);
1367         if (!connected && tc->mode != default_tc_mode(tc)) {
1368                 tc->mode = default_tc_mode(tc);
1369                 connected = tc->phy_ops->connect(tc, required_lanes);
1370         }
1371
1372         drm_WARN_ON(&i915->drm, !connected);
1373 }
1374
1375 static void tc_phy_disconnect(struct intel_tc_port *tc)
1376 {
1377         if (tc->mode != TC_PORT_DISCONNECTED) {
1378                 tc->phy_ops->disconnect(tc);
1379                 tc->mode = TC_PORT_DISCONNECTED;
1380         }
1381 }
1382
1383 static void tc_phy_init(struct intel_tc_port *tc)
1384 {
1385         mutex_lock(&tc->lock);
1386         tc->phy_ops->init(tc);
1387         mutex_unlock(&tc->lock);
1388 }
1389
1390 static void intel_tc_port_reset_mode(struct intel_tc_port *tc,
1391                                      int required_lanes, bool force_disconnect)
1392 {
1393         struct drm_i915_private *i915 = tc_to_i915(tc);
1394         struct intel_digital_port *dig_port = tc->dig_port;
1395         enum tc_port_mode old_tc_mode = tc->mode;
1396
1397         intel_display_power_flush_work(i915);
1398         if (!intel_tc_cold_requires_aux_pw(dig_port)) {
1399                 enum intel_display_power_domain aux_domain;
1400                 bool aux_powered;
1401
1402                 aux_domain = intel_aux_power_domain(dig_port);
1403                 aux_powered = intel_display_power_is_enabled(i915, aux_domain);
1404                 drm_WARN_ON(&i915->drm, aux_powered);
1405         }
1406
1407         tc_phy_disconnect(tc);
1408         if (!force_disconnect)
1409                 tc_phy_connect(tc, required_lanes);
1410
1411         drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
1412                     tc->port_name,
1413                     tc_port_mode_name(old_tc_mode),
1414                     tc_port_mode_name(tc->mode));
1415 }
1416
1417 static bool intel_tc_port_needs_reset(struct intel_tc_port *tc)
1418 {
1419         return tc_phy_get_target_mode(tc) != tc->mode;
1420 }
1421
1422 static void intel_tc_port_update_mode(struct intel_tc_port *tc,
1423                                       int required_lanes, bool force_disconnect)
1424 {
1425         if (force_disconnect ||
1426             intel_tc_port_needs_reset(tc))
1427                 intel_tc_port_reset_mode(tc, required_lanes, force_disconnect);
1428 }
1429
1430 static void __intel_tc_port_get_link(struct intel_tc_port *tc)
1431 {
1432         tc->link_refcount++;
1433 }
1434
1435 static void __intel_tc_port_put_link(struct intel_tc_port *tc)
1436 {
1437         tc->link_refcount--;
1438 }
1439
1440 static bool tc_port_is_enabled(struct intel_tc_port *tc)
1441 {
1442         struct drm_i915_private *i915 = tc_to_i915(tc);
1443         struct intel_digital_port *dig_port = tc->dig_port;
1444
1445         assert_tc_port_power_enabled(tc);
1446
1447         return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) &
1448                DDI_BUF_CTL_ENABLE;
1449 }
1450
1451 /**
1452  * intel_tc_port_init_mode: Read out HW state and init the given port's TypeC mode
1453  * @dig_port: digital port
1454  *
1455  * Read out the HW state and initialize the TypeC mode of @dig_port. The mode
1456  * will be locked until intel_tc_port_sanitize_mode() is called.
1457  */
1458 void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
1459 {
1460         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1461         struct intel_tc_port *tc = to_tc_port(dig_port);
1462         bool update_mode = false;
1463
1464         mutex_lock(&tc->lock);
1465
1466         drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
1467         drm_WARN_ON(&i915->drm, tc->lock_wakeref);
1468         drm_WARN_ON(&i915->drm, tc->link_refcount);
1469
1470         tc_phy_get_hw_state(tc);
1471         /*
1472          * Save the initial mode for the state check in
1473          * intel_tc_port_sanitize_mode().
1474          */
1475         tc->init_mode = tc->mode;
1476
1477         /*
1478          * The PHY needs to be connected for AUX to work during HW readout and
1479          * MST topology resume, but the PHY mode can only be changed if the
1480          * port is disabled.
1481          *
1482          * An exception is the case where BIOS leaves the PHY incorrectly
1483          * disconnected on an enabled legacy port. Work around that by
1484          * connecting the PHY even though the port is enabled. This doesn't
1485          * cause a problem as the PHY ownership state is ignored by the
1486          * IOM/TCSS firmware (only display can own the PHY in that case).
1487          */
1488         if (!tc_port_is_enabled(tc)) {
1489                 update_mode = true;
1490         } else if (tc->mode == TC_PORT_DISCONNECTED) {
1491                 drm_WARN_ON(&i915->drm, !tc->legacy_port);
1492                 drm_err(&i915->drm,
1493                         "Port %s: PHY disconnected on enabled port, connecting it\n",
1494                         tc->port_name);
1495                 update_mode = true;
1496         }
1497
1498         if (update_mode)
1499                 intel_tc_port_update_mode(tc, 1, false);
1500
1501         /* Prevent changing tc->mode until intel_tc_port_sanitize_mode() is called. */
1502         __intel_tc_port_get_link(tc);
1503
1504         mutex_unlock(&tc->lock);
1505 }
1506
1507 static bool tc_port_has_active_links(struct intel_tc_port *tc,
1508                                      const struct intel_crtc_state *crtc_state)
1509 {
1510         struct drm_i915_private *i915 = tc_to_i915(tc);
1511         struct intel_digital_port *dig_port = tc->dig_port;
1512         enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT;
1513         int active_links = 0;
1514
1515         if (dig_port->dp.is_mst) {
1516                 /* TODO: get the PLL type for MST, once HW readout is done for it. */
1517                 active_links = intel_dp_mst_encoder_active_links(dig_port);
1518         } else if (crtc_state && crtc_state->hw.active) {
1519                 pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state);
1520                 active_links = 1;
1521         }
1522
1523         if (active_links && !tc_phy_is_connected(tc, pll_type))
1524                 drm_err(&i915->drm,
1525                         "Port %s: PHY disconnected with %d active link(s)\n",
1526                         tc->port_name, active_links);
1527
1528         return active_links;
1529 }
1530
1531 /**
1532  * intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode
1533  * @dig_port: digital port
1534  * @crtc_state: atomic state of CRTC connected to @dig_port
1535  *
1536  * Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver
1537  * loading and system resume:
1538  * If the encoder is enabled keep the TypeC mode/PHY connected state locked until
1539  * the encoder is disabled.
1540  * If the encoder is disabled make sure the PHY is disconnected.
1541  * @crtc_state is valid if @dig_port is enabled, NULL otherwise.
1542  */
1543 void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
1544                                  const struct intel_crtc_state *crtc_state)
1545 {
1546         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1547         struct intel_tc_port *tc = to_tc_port(dig_port);
1548
1549         mutex_lock(&tc->lock);
1550
1551         drm_WARN_ON(&i915->drm, tc->link_refcount != 1);
1552         if (!tc_port_has_active_links(tc, crtc_state)) {
1553                 /*
1554                  * TBT-alt is the default mode in any case the PHY ownership is not
1555                  * held (regardless of the sink's connected live state), so
1556                  * we'll just switch to disconnected mode from it here without
1557                  * a note.
1558                  */
1559                 if (tc->init_mode != TC_PORT_TBT_ALT &&
1560                     tc->init_mode != TC_PORT_DISCONNECTED)
1561                         drm_dbg_kms(&i915->drm,
1562                                     "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
1563                                     tc->port_name,
1564                                     tc_port_mode_name(tc->init_mode));
1565                 tc_phy_disconnect(tc);
1566                 __intel_tc_port_put_link(tc);
1567         }
1568
1569         drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
1570                     tc->port_name,
1571                     tc_port_mode_name(tc->mode));
1572
1573         mutex_unlock(&tc->lock);
1574 }
1575
1576 /*
1577  * The type-C ports are different because even when they are connected, they may
1578  * not be available/usable by the graphics driver: see the comment on
1579  * icl_tc_phy_connect(). So in our driver instead of adding the additional
1580  * concept of "usable" and make everything check for "connected and usable" we
1581  * define a port as "connected" when it is not only connected, but also when it
1582  * is usable by the rest of the driver. That maintains the old assumption that
1583  * connected ports are usable, and avoids exposing to the users objects they
1584  * can't really use.
1585  */
1586 bool intel_tc_port_connected_locked(struct intel_encoder *encoder)
1587 {
1588         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1589         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1590         struct intel_tc_port *tc = to_tc_port(dig_port);
1591         u32 mask = ~0;
1592
1593         drm_WARN_ON(&i915->drm, !intel_tc_port_ref_held(dig_port));
1594
1595         if (tc->mode != TC_PORT_DISCONNECTED)
1596                 mask = BIT(tc->mode);
1597
1598         return tc_phy_hpd_live_status(tc) & mask;
1599 }
1600
1601 bool intel_tc_port_connected(struct intel_encoder *encoder)
1602 {
1603         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1604         struct intel_tc_port *tc = to_tc_port(dig_port);
1605         bool is_connected;
1606
1607         mutex_lock(&tc->lock);
1608         is_connected = intel_tc_port_connected_locked(encoder);
1609         mutex_unlock(&tc->lock);
1610
1611         return is_connected;
1612 }
1613
1614 static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc)
1615 {
1616         bool ret;
1617
1618         mutex_lock(&tc->lock);
1619
1620         ret = tc->link_refcount &&
1621               tc->mode == TC_PORT_DP_ALT &&
1622               intel_tc_port_needs_reset(tc);
1623
1624         mutex_unlock(&tc->lock);
1625
1626         return ret;
1627 }
1628
1629 bool intel_tc_port_link_needs_reset(struct intel_digital_port *dig_port)
1630 {
1631         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1632         enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
1633
1634         if (!intel_phy_is_tc(i915, phy))
1635                 return false;
1636
1637         return __intel_tc_port_link_needs_reset(to_tc_port(dig_port));
1638 }
1639
1640 static int reset_link_commit(struct intel_tc_port *tc,
1641                              struct intel_atomic_state *state,
1642                              struct drm_modeset_acquire_ctx *ctx)
1643 {
1644         struct drm_i915_private *i915 = tc_to_i915(tc);
1645         struct intel_digital_port *dig_port = tc->dig_port;
1646         struct intel_dp *intel_dp = enc_to_intel_dp(&dig_port->base);
1647         struct intel_crtc *crtc;
1648         u8 pipe_mask;
1649         int ret;
1650
1651         ret = drm_modeset_lock(&i915->drm.mode_config.connection_mutex, ctx);
1652         if (ret)
1653                 return ret;
1654
1655         ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask);
1656         if (ret)
1657                 return ret;
1658
1659         if (!pipe_mask)
1660                 return 0;
1661
1662         for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
1663                 struct intel_crtc_state *crtc_state;
1664
1665                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
1666                 if (IS_ERR(crtc_state))
1667                         return PTR_ERR(crtc_state);
1668
1669                 crtc_state->uapi.connectors_changed = true;
1670         }
1671
1672         if (!__intel_tc_port_link_needs_reset(tc))
1673                 return 0;
1674
1675         return drm_atomic_commit(&state->base);
1676 }
1677
1678 static int reset_link(struct intel_tc_port *tc)
1679 {
1680         struct drm_i915_private *i915 = tc_to_i915(tc);
1681         struct drm_modeset_acquire_ctx ctx;
1682         struct drm_atomic_state *_state;
1683         struct intel_atomic_state *state;
1684         int ret;
1685
1686         _state = drm_atomic_state_alloc(&i915->drm);
1687         if (!_state)
1688                 return -ENOMEM;
1689
1690         state = to_intel_atomic_state(_state);
1691         state->internal = true;
1692
1693         intel_modeset_lock_ctx_retry(&ctx, state, 0, ret)
1694                 ret = reset_link_commit(tc, state, &ctx);
1695
1696         drm_atomic_state_put(&state->base);
1697
1698         return ret;
1699 }
1700
1701 static void intel_tc_port_link_reset_work(struct work_struct *work)
1702 {
1703         struct intel_tc_port *tc =
1704                 container_of(work, struct intel_tc_port, link_reset_work.work);
1705         struct drm_i915_private *i915 = tc_to_i915(tc);
1706         int ret;
1707
1708         if (!__intel_tc_port_link_needs_reset(tc))
1709                 return;
1710
1711         mutex_lock(&i915->drm.mode_config.mutex);
1712
1713         drm_dbg_kms(&i915->drm,
1714                     "Port %s: TypeC DP-alt sink disconnected, resetting link\n",
1715                     tc->port_name);
1716         ret = reset_link(tc);
1717         drm_WARN_ON(&i915->drm, ret);
1718
1719         mutex_unlock(&i915->drm.mode_config.mutex);
1720 }
1721
1722 bool intel_tc_port_link_reset(struct intel_digital_port *dig_port)
1723 {
1724         if (!intel_tc_port_link_needs_reset(dig_port))
1725                 return false;
1726
1727         queue_delayed_work(system_unbound_wq,
1728                            &to_tc_port(dig_port)->link_reset_work,
1729                            msecs_to_jiffies(2000));
1730
1731         return true;
1732 }
1733
1734 void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port)
1735 {
1736         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1737         enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
1738         struct intel_tc_port *tc = to_tc_port(dig_port);
1739
1740         if (!intel_phy_is_tc(i915, phy))
1741                 return;
1742
1743         cancel_delayed_work(&tc->link_reset_work);
1744 }
1745
1746 static void __intel_tc_port_lock(struct intel_tc_port *tc,
1747                                  int required_lanes)
1748 {
1749         struct drm_i915_private *i915 = tc_to_i915(tc);
1750
1751         mutex_lock(&tc->lock);
1752
1753         cancel_delayed_work(&tc->disconnect_phy_work);
1754
1755         if (!tc->link_refcount)
1756                 intel_tc_port_update_mode(tc, required_lanes,
1757                                           false);
1758
1759         drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_DISCONNECTED);
1760         drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_TBT_ALT &&
1761                                 !tc_phy_is_owned(tc));
1762 }
1763
1764 void intel_tc_port_lock(struct intel_digital_port *dig_port)
1765 {
1766         __intel_tc_port_lock(to_tc_port(dig_port), 1);
1767 }
1768
1769 /*
1770  * Disconnect the given digital port from its TypeC PHY (handing back the
1771  * control of the PHY to the TypeC subsystem). This will happen in a delayed
1772  * manner after each aux transactions and modeset disables.
1773  */
1774 static void intel_tc_port_disconnect_phy_work(struct work_struct *work)
1775 {
1776         struct intel_tc_port *tc =
1777                 container_of(work, struct intel_tc_port, disconnect_phy_work.work);
1778
1779         mutex_lock(&tc->lock);
1780
1781         if (!tc->link_refcount)
1782                 intel_tc_port_update_mode(tc, 1, true);
1783
1784         mutex_unlock(&tc->lock);
1785 }
1786
1787 /**
1788  * intel_tc_port_flush_work: flush the work disconnecting the PHY
1789  * @dig_port: digital port
1790  *
1791  * Flush the delayed work disconnecting an idle PHY.
1792  */
1793 static void intel_tc_port_flush_work(struct intel_digital_port *dig_port)
1794 {
1795         flush_delayed_work(&to_tc_port(dig_port)->disconnect_phy_work);
1796 }
1797
1798 void intel_tc_port_suspend(struct intel_digital_port *dig_port)
1799 {
1800         struct intel_tc_port *tc = to_tc_port(dig_port);
1801
1802         cancel_delayed_work_sync(&tc->link_reset_work);
1803         intel_tc_port_flush_work(dig_port);
1804 }
1805
1806 void intel_tc_port_unlock(struct intel_digital_port *dig_port)
1807 {
1808         struct intel_tc_port *tc = to_tc_port(dig_port);
1809
1810         if (!tc->link_refcount && tc->mode != TC_PORT_DISCONNECTED)
1811                 queue_delayed_work(system_unbound_wq, &tc->disconnect_phy_work,
1812                                    msecs_to_jiffies(1000));
1813
1814         mutex_unlock(&tc->lock);
1815 }
1816
1817 bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
1818 {
1819         struct intel_tc_port *tc = to_tc_port(dig_port);
1820
1821         return mutex_is_locked(&tc->lock) ||
1822                tc->link_refcount;
1823 }
1824
1825 void intel_tc_port_get_link(struct intel_digital_port *dig_port,
1826                             int required_lanes)
1827 {
1828         struct intel_tc_port *tc = to_tc_port(dig_port);
1829
1830         __intel_tc_port_lock(tc, required_lanes);
1831         __intel_tc_port_get_link(tc);
1832         intel_tc_port_unlock(dig_port);
1833 }
1834
1835 void intel_tc_port_put_link(struct intel_digital_port *dig_port)
1836 {
1837         struct intel_tc_port *tc = to_tc_port(dig_port);
1838
1839         intel_tc_port_lock(dig_port);
1840         __intel_tc_port_put_link(tc);
1841         intel_tc_port_unlock(dig_port);
1842
1843         /*
1844          * The firmware will not update the HPD status of other TypeC ports
1845          * that are active in DP-alt mode with their sink disconnected, until
1846          * this port is disabled and its PHY gets disconnected. Make sure this
1847          * happens in a timely manner by disconnecting the PHY synchronously.
1848          */
1849         intel_tc_port_flush_work(dig_port);
1850 }
1851
1852 int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
1853 {
1854         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1855         struct intel_tc_port *tc;
1856         enum port port = dig_port->base.port;
1857         enum tc_port tc_port = intel_port_to_tc(i915, port);
1858
1859         if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
1860                 return -EINVAL;
1861
1862         tc = kzalloc(sizeof(*tc), GFP_KERNEL);
1863         if (!tc)
1864                 return -ENOMEM;
1865
1866         dig_port->tc = tc;
1867         tc->dig_port = dig_port;
1868
1869         if (DISPLAY_VER(i915) >= 14)
1870                 tc->phy_ops = &xelpdp_tc_phy_ops;
1871         else if (DISPLAY_VER(i915) >= 13)
1872                 tc->phy_ops = &adlp_tc_phy_ops;
1873         else if (DISPLAY_VER(i915) >= 12)
1874                 tc->phy_ops = &tgl_tc_phy_ops;
1875         else
1876                 tc->phy_ops = &icl_tc_phy_ops;
1877
1878         tc->port_name = kasprintf(GFP_KERNEL, "%c/TC#%d", port_name(port),
1879                                   tc_port + 1);
1880         if (!tc->port_name) {
1881                 kfree(tc);
1882                 return -ENOMEM;
1883         }
1884
1885         mutex_init(&tc->lock);
1886         /* TODO: Combine the two works */
1887         INIT_DELAYED_WORK(&tc->disconnect_phy_work, intel_tc_port_disconnect_phy_work);
1888         INIT_DELAYED_WORK(&tc->link_reset_work, intel_tc_port_link_reset_work);
1889         tc->legacy_port = is_legacy;
1890         tc->mode = TC_PORT_DISCONNECTED;
1891         tc->link_refcount = 0;
1892
1893         tc_phy_init(tc);
1894
1895         intel_tc_port_init_mode(dig_port);
1896
1897         return 0;
1898 }
1899
1900 void intel_tc_port_cleanup(struct intel_digital_port *dig_port)
1901 {
1902         intel_tc_port_suspend(dig_port);
1903
1904         kfree(dig_port->tc->port_name);
1905         kfree(dig_port->tc);
1906         dig_port->tc = NULL;
1907 }
This page took 0.140286 seconds and 4 git commands to generate.