]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/display/intel_tc.c
drm/i915/display/adl_p: Drop earlier return in tc_has_modular_fia()
[linux.git] / drivers / gpu / drm / i915 / display / intel_tc.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include "i915_drv.h"
7 #include "intel_display.h"
8 #include "intel_display_types.h"
9 #include "intel_dp_mst.h"
10 #include "intel_tc.h"
11
12 static const char *tc_port_mode_name(enum tc_port_mode mode)
13 {
14         static const char * const names[] = {
15                 [TC_PORT_TBT_ALT] = "tbt-alt",
16                 [TC_PORT_DP_ALT] = "dp-alt",
17                 [TC_PORT_LEGACY] = "legacy",
18         };
19
20         if (WARN_ON(mode >= ARRAY_SIZE(names)))
21                 mode = TC_PORT_TBT_ALT;
22
23         return names[mode];
24 }
25
26 static enum intel_display_power_domain
27 tc_cold_get_power_domain(struct intel_digital_port *dig_port)
28 {
29         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
30
31         if (DISPLAY_VER(i915) == 11)
32                 return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
33         else
34                 return POWER_DOMAIN_TC_COLD_OFF;
35 }
36
37 static intel_wakeref_t
38 tc_cold_block(struct intel_digital_port *dig_port)
39 {
40         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
41         enum intel_display_power_domain domain;
42
43         if (DISPLAY_VER(i915) == 11 && !dig_port->tc_legacy_port)
44                 return 0;
45
46         domain = tc_cold_get_power_domain(dig_port);
47         return intel_display_power_get(i915, domain);
48 }
49
50 static void
51 tc_cold_unblock(struct intel_digital_port *dig_port, intel_wakeref_t wakeref)
52 {
53         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
54         enum intel_display_power_domain domain;
55
56         /*
57          * wakeref == -1, means some error happened saving save_depot_stack but
58          * power should still be put down and 0 is a invalid save_depot_stack
59          * id so can be used to skip it for non TC legacy ports.
60          */
61         if (wakeref == 0)
62                 return;
63
64         domain = tc_cold_get_power_domain(dig_port);
65         intel_display_power_put_async(i915, domain, wakeref);
66 }
67
68 static void
69 assert_tc_cold_blocked(struct intel_digital_port *dig_port)
70 {
71         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
72         bool enabled;
73
74         if (DISPLAY_VER(i915) == 11 && !dig_port->tc_legacy_port)
75                 return;
76
77         enabled = intel_display_power_is_enabled(i915,
78                                                  tc_cold_get_power_domain(dig_port));
79         drm_WARN_ON(&i915->drm, !enabled);
80 }
81
82 u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
83 {
84         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
85         struct intel_uncore *uncore = &i915->uncore;
86         u32 lane_mask;
87
88         lane_mask = intel_uncore_read(uncore,
89                                       PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
90
91         drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
92         assert_tc_cold_blocked(dig_port);
93
94         lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
95         return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
96 }
97
98 u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
99 {
100         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
101         struct intel_uncore *uncore = &i915->uncore;
102         u32 pin_mask;
103
104         pin_mask = intel_uncore_read(uncore,
105                                      PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));
106
107         drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
108         assert_tc_cold_blocked(dig_port);
109
110         return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
111                DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
112 }
113
114 int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
115 {
116         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
117         intel_wakeref_t wakeref;
118         u32 lane_mask;
119
120         if (dig_port->tc_mode != TC_PORT_DP_ALT)
121                 return 4;
122
123         assert_tc_cold_blocked(dig_port);
124
125         lane_mask = 0;
126         with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
127                 lane_mask = intel_tc_port_get_lane_mask(dig_port);
128
129         switch (lane_mask) {
130         default:
131                 MISSING_CASE(lane_mask);
132                 fallthrough;
133         case 0x1:
134         case 0x2:
135         case 0x4:
136         case 0x8:
137                 return 1;
138         case 0x3:
139         case 0xc:
140                 return 2;
141         case 0xf:
142                 return 4;
143         }
144 }
145
146 void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
147                                       int required_lanes)
148 {
149         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
150         bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
151         struct intel_uncore *uncore = &i915->uncore;
152         u32 val;
153
154         drm_WARN_ON(&i915->drm,
155                     lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
156
157         assert_tc_cold_blocked(dig_port);
158
159         val = intel_uncore_read(uncore,
160                                 PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
161         val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx);
162
163         switch (required_lanes) {
164         case 1:
165                 val |= lane_reversal ?
166                         DFLEXDPMLE1_DPMLETC_ML3(dig_port->tc_phy_fia_idx) :
167                         DFLEXDPMLE1_DPMLETC_ML0(dig_port->tc_phy_fia_idx);
168                 break;
169         case 2:
170                 val |= lane_reversal ?
171                         DFLEXDPMLE1_DPMLETC_ML3_2(dig_port->tc_phy_fia_idx) :
172                         DFLEXDPMLE1_DPMLETC_ML1_0(dig_port->tc_phy_fia_idx);
173                 break;
174         case 4:
175                 val |= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port->tc_phy_fia_idx);
176                 break;
177         default:
178                 MISSING_CASE(required_lanes);
179         }
180
181         intel_uncore_write(uncore,
182                            PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val);
183 }
184
185 static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
186                                       u32 live_status_mask)
187 {
188         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
189         u32 valid_hpd_mask;
190
191         if (dig_port->tc_legacy_port)
192                 valid_hpd_mask = BIT(TC_PORT_LEGACY);
193         else
194                 valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
195                                  BIT(TC_PORT_TBT_ALT);
196
197         if (!(live_status_mask & ~valid_hpd_mask))
198                 return;
199
200         /* If live status mismatches the VBT flag, trust the live status. */
201         drm_dbg_kms(&i915->drm,
202                     "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
203                     dig_port->tc_port_name, live_status_mask, valid_hpd_mask);
204
205         dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
206 }
207
208 static u32 icl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
209 {
210         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
211         struct intel_uncore *uncore = &i915->uncore;
212         u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin];
213         u32 mask = 0;
214         u32 val;
215
216         val = intel_uncore_read(uncore,
217                                 PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
218
219         if (val == 0xffffffff) {
220                 drm_dbg_kms(&i915->drm,
221                             "Port %s: PHY in TCCOLD, nothing connected\n",
222                             dig_port->tc_port_name);
223                 return mask;
224         }
225
226         if (val & TC_LIVE_STATE_TBT(dig_port->tc_phy_fia_idx))
227                 mask |= BIT(TC_PORT_TBT_ALT);
228         if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx))
229                 mask |= BIT(TC_PORT_DP_ALT);
230
231         if (intel_uncore_read(uncore, SDEISR) & isr_bit)
232                 mask |= BIT(TC_PORT_LEGACY);
233
234         /* The sink can be connected only in a single mode. */
235         if (!drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1))
236                 tc_port_fixup_legacy_flag(dig_port, mask);
237
238         return mask;
239 }
240
241 static u32 adl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
242 {
243         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
244         enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
245         u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin];
246         struct intel_uncore *uncore = &i915->uncore;
247         u32 val, mask = 0;
248
249         val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
250         if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT)
251                 mask |= BIT(TC_PORT_DP_ALT);
252         if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT)
253                 mask |= BIT(TC_PORT_TBT_ALT);
254
255         if (intel_uncore_read(uncore, SDEISR) & isr_bit)
256                 mask |= BIT(TC_PORT_LEGACY);
257
258         /* The sink can be connected only in a single mode. */
259         if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1))
260                 tc_port_fixup_legacy_flag(dig_port, mask);
261
262         return mask;
263 }
264
265 static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
266 {
267         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
268
269         if (IS_ALDERLAKE_P(i915))
270                 return adl_tc_port_live_status_mask(dig_port);
271
272         return icl_tc_port_live_status_mask(dig_port);
273 }
274
275 static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
276 {
277         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
278         struct intel_uncore *uncore = &i915->uncore;
279         u32 val;
280
281         val = intel_uncore_read(uncore,
282                                 PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia));
283         if (val == 0xffffffff) {
284                 drm_dbg_kms(&i915->drm,
285                             "Port %s: PHY in TCCOLD, assuming not complete\n",
286                             dig_port->tc_port_name);
287                 return false;
288         }
289
290         return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx);
291 }
292
293 static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port)
294 {
295         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
296         struct intel_uncore *uncore = &i915->uncore;
297         u32 val;
298
299         val = intel_uncore_read(uncore, TCSS_DDI_STATUS(dig_port->tc_phy_fia_idx));
300         if (val == 0xffffffff) {
301                 drm_dbg_kms(&i915->drm,
302                             "Port %s: PHY in TCCOLD, assuming not complete\n",
303                             dig_port->tc_port_name);
304                 return false;
305         }
306
307         return val & TCSS_DDI_STATUS_READY;
308 }
309
310 static bool tc_phy_status_complete(struct intel_digital_port *dig_port)
311 {
312         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
313
314         if (IS_ALDERLAKE_P(i915))
315                 return adl_tc_phy_status_complete(dig_port);
316
317         return icl_tc_phy_status_complete(dig_port);
318 }
319
320 static bool icl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
321                                       bool take)
322 {
323         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
324         struct intel_uncore *uncore = &i915->uncore;
325         u32 val;
326
327         val = intel_uncore_read(uncore,
328                                 PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
329         if (val == 0xffffffff) {
330                 drm_dbg_kms(&i915->drm,
331                             "Port %s: PHY in TCCOLD, can't %s ownership\n",
332                             dig_port->tc_port_name, take ? "take" : "release");
333
334                 return false;
335         }
336
337         val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
338         if (take)
339                 val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
340
341         intel_uncore_write(uncore,
342                            PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
343
344         if (!take && wait_for(!tc_phy_status_complete(dig_port), 10))
345                 drm_dbg_kms(&i915->drm,
346                             "Port %s: PHY complete clear timed out\n",
347                             dig_port->tc_port_name);
348
349         return true;
350 }
351
352 static bool adl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
353                                       bool take)
354 {
355         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
356         struct intel_uncore *uncore = &i915->uncore;
357         enum port port = dig_port->base.port;
358         u32 val;
359
360         val = intel_uncore_read(uncore, DDI_BUF_CTL(port));
361         if (take)
362                 val |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
363         else
364                 val &= ~DDI_BUF_CTL_TC_PHY_OWNERSHIP;
365         intel_uncore_write(uncore, DDI_BUF_CTL(port), val);
366
367         return true;
368 }
369
370 static bool tc_phy_take_ownership(struct intel_digital_port *dig_port, bool take)
371 {
372         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
373
374         if (IS_ALDERLAKE_P(i915))
375                 return adl_tc_phy_take_ownership(dig_port, take);
376
377         return icl_tc_phy_take_ownership(dig_port, take);
378 }
379
380 static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port)
381 {
382         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
383         struct intel_uncore *uncore = &i915->uncore;
384         u32 val;
385
386         val = intel_uncore_read(uncore,
387                                 PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
388         if (val == 0xffffffff) {
389                 drm_dbg_kms(&i915->drm,
390                             "Port %s: PHY in TCCOLD, assume safe mode\n",
391                             dig_port->tc_port_name);
392                 return true;
393         }
394
395         return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
396 }
397
398 static bool adl_tc_phy_is_owned(struct intel_digital_port *dig_port)
399 {
400         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
401         struct intel_uncore *uncore = &i915->uncore;
402         enum port port = dig_port->base.port;
403         u32 val;
404
405         val = intel_uncore_read(uncore, DDI_BUF_CTL(port));
406         return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
407 }
408
409 static bool tc_phy_is_owned(struct intel_digital_port *dig_port)
410 {
411         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
412
413         if (IS_ALDERLAKE_P(i915))
414                 return adl_tc_phy_is_owned(dig_port);
415
416         return icl_tc_phy_is_owned(dig_port);
417 }
418
419 /*
420  * This function implements the first part of the Connect Flow described by our
421  * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
422  * lanes, EDID, etc) is done as needed in the typical places.
423  *
424  * Unlike the other ports, type-C ports are not available to use as soon as we
425  * get a hotplug. The type-C PHYs can be shared between multiple controllers:
426  * display, USB, etc. As a result, handshaking through FIA is required around
427  * connect and disconnect to cleanly transfer ownership with the controller and
428  * set the type-C power state.
429  */
430 static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
431                                int required_lanes)
432 {
433         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
434         int max_lanes;
435
436         if (!tc_phy_status_complete(dig_port)) {
437                 drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
438                             dig_port->tc_port_name);
439                 goto out_set_tbt_alt_mode;
440         }
441
442         if (!tc_phy_take_ownership(dig_port, true) &&
443             !drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port))
444                 goto out_set_tbt_alt_mode;
445
446         max_lanes = intel_tc_port_fia_max_lane_count(dig_port);
447         if (dig_port->tc_legacy_port) {
448                 drm_WARN_ON(&i915->drm, max_lanes != 4);
449                 dig_port->tc_mode = TC_PORT_LEGACY;
450
451                 return;
452         }
453
454         /*
455          * Now we have to re-check the live state, in case the port recently
456          * became disconnected. Not necessary for legacy mode.
457          */
458         if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) {
459                 drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
460                             dig_port->tc_port_name);
461                 goto out_release_phy;
462         }
463
464         if (max_lanes < required_lanes) {
465                 drm_dbg_kms(&i915->drm,
466                             "Port %s: PHY max lanes %d < required lanes %d\n",
467                             dig_port->tc_port_name,
468                             max_lanes, required_lanes);
469                 goto out_release_phy;
470         }
471
472         dig_port->tc_mode = TC_PORT_DP_ALT;
473
474         return;
475
476 out_release_phy:
477         tc_phy_take_ownership(dig_port, false);
478 out_set_tbt_alt_mode:
479         dig_port->tc_mode = TC_PORT_TBT_ALT;
480 }
481
482 /*
483  * See the comment at the connect function. This implements the Disconnect
484  * Flow.
485  */
486 static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
487 {
488         switch (dig_port->tc_mode) {
489         case TC_PORT_LEGACY:
490                 /* Nothing to do, we never disconnect from legacy mode */
491                 break;
492         case TC_PORT_DP_ALT:
493                 tc_phy_take_ownership(dig_port, false);
494                 dig_port->tc_mode = TC_PORT_TBT_ALT;
495                 break;
496         case TC_PORT_TBT_ALT:
497                 /* Nothing to do, we stay in TBT-alt mode */
498                 break;
499         default:
500                 MISSING_CASE(dig_port->tc_mode);
501         }
502 }
503
504 static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
505 {
506         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
507
508         if (!tc_phy_status_complete(dig_port)) {
509                 drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n",
510                             dig_port->tc_port_name);
511                 return dig_port->tc_mode == TC_PORT_TBT_ALT;
512         }
513
514         if (!tc_phy_is_owned(dig_port)) {
515                 drm_dbg_kms(&i915->drm, "Port %s: PHY not owned\n",
516                             dig_port->tc_port_name);
517
518                 return false;
519         }
520
521         return dig_port->tc_mode == TC_PORT_DP_ALT ||
522                dig_port->tc_mode == TC_PORT_LEGACY;
523 }
524
525 static enum tc_port_mode
526 intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
527 {
528         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
529         u32 live_status_mask = tc_port_live_status_mask(dig_port);
530         enum tc_port_mode mode;
531
532         if (!tc_phy_is_owned(dig_port) ||
533             drm_WARN_ON(&i915->drm, !tc_phy_status_complete(dig_port)))
534                 return TC_PORT_TBT_ALT;
535
536         mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
537         if (live_status_mask) {
538                 enum tc_port_mode live_mode = fls(live_status_mask) - 1;
539
540                 if (!drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT))
541                         mode = live_mode;
542         }
543
544         return mode;
545 }
546
547 static enum tc_port_mode
548 intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
549 {
550         u32 live_status_mask = tc_port_live_status_mask(dig_port);
551
552         if (live_status_mask)
553                 return fls(live_status_mask) - 1;
554
555         return tc_phy_status_complete(dig_port) &&
556                dig_port->tc_legacy_port ? TC_PORT_LEGACY :
557                                           TC_PORT_TBT_ALT;
558 }
559
560 static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
561                                      int required_lanes)
562 {
563         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
564         enum tc_port_mode old_tc_mode = dig_port->tc_mode;
565
566         intel_display_power_flush_work(i915);
567         if (DISPLAY_VER(i915) != 11 || !dig_port->tc_legacy_port) {
568                 enum intel_display_power_domain aux_domain;
569                 bool aux_powered;
570
571                 aux_domain = intel_aux_power_domain(dig_port);
572                 aux_powered = intel_display_power_is_enabled(i915, aux_domain);
573                 drm_WARN_ON(&i915->drm, aux_powered);
574         }
575
576         icl_tc_phy_disconnect(dig_port);
577         icl_tc_phy_connect(dig_port, required_lanes);
578
579         drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
580                     dig_port->tc_port_name,
581                     tc_port_mode_name(old_tc_mode),
582                     tc_port_mode_name(dig_port->tc_mode));
583 }
584
585 static void
586 intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
587                                  int refcount)
588 {
589         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
590
591         drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount);
592         dig_port->tc_link_refcount = refcount;
593 }
594
595 void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
596 {
597         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
598         struct intel_encoder *encoder = &dig_port->base;
599         intel_wakeref_t tc_cold_wref;
600         int active_links = 0;
601
602         mutex_lock(&dig_port->tc_lock);
603         tc_cold_wref = tc_cold_block(dig_port);
604
605         dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
606         if (dig_port->dp.is_mst)
607                 active_links = intel_dp_mst_encoder_active_links(dig_port);
608         else if (encoder->base.crtc)
609                 active_links = to_intel_crtc(encoder->base.crtc)->active;
610
611         if (active_links) {
612                 if (!icl_tc_phy_is_connected(dig_port))
613                         drm_dbg_kms(&i915->drm,
614                                     "Port %s: PHY disconnected with %d active link(s)\n",
615                                     dig_port->tc_port_name, active_links);
616                 intel_tc_port_link_init_refcount(dig_port, active_links);
617
618                 goto out;
619         }
620
621         if (dig_port->tc_legacy_port)
622                 icl_tc_phy_connect(dig_port, 1);
623
624 out:
625         drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
626                     dig_port->tc_port_name,
627                     tc_port_mode_name(dig_port->tc_mode));
628
629         tc_cold_unblock(dig_port, tc_cold_wref);
630         mutex_unlock(&dig_port->tc_lock);
631 }
632
633 static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
634 {
635         return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode;
636 }
637
638 /*
639  * The type-C ports are different because even when they are connected, they may
640  * not be available/usable by the graphics driver: see the comment on
641  * icl_tc_phy_connect(). So in our driver instead of adding the additional
642  * concept of "usable" and make everything check for "connected and usable" we
643  * define a port as "connected" when it is not only connected, but also when it
644  * is usable by the rest of the driver. That maintains the old assumption that
645  * connected ports are usable, and avoids exposing to the users objects they
646  * can't really use.
647  */
648 bool intel_tc_port_connected(struct intel_encoder *encoder)
649 {
650         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
651         bool is_connected;
652         intel_wakeref_t tc_cold_wref;
653
654         intel_tc_port_lock(dig_port);
655         tc_cold_wref = tc_cold_block(dig_port);
656
657         is_connected = tc_port_live_status_mask(dig_port) &
658                        BIT(dig_port->tc_mode);
659
660         tc_cold_unblock(dig_port, tc_cold_wref);
661         intel_tc_port_unlock(dig_port);
662
663         return is_connected;
664 }
665
666 static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
667                                  int required_lanes)
668 {
669         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
670         intel_wakeref_t wakeref;
671
672         wakeref = intel_display_power_get(i915, POWER_DOMAIN_DISPLAY_CORE);
673
674         mutex_lock(&dig_port->tc_lock);
675
676         if (!dig_port->tc_link_refcount) {
677                 intel_wakeref_t tc_cold_wref;
678
679                 tc_cold_wref = tc_cold_block(dig_port);
680
681                 if (intel_tc_port_needs_reset(dig_port))
682                         intel_tc_port_reset_mode(dig_port, required_lanes);
683
684                 tc_cold_unblock(dig_port, tc_cold_wref);
685         }
686
687         drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
688         dig_port->tc_lock_wakeref = wakeref;
689 }
690
691 void intel_tc_port_lock(struct intel_digital_port *dig_port)
692 {
693         __intel_tc_port_lock(dig_port, 1);
694 }
695
696 void intel_tc_port_unlock(struct intel_digital_port *dig_port)
697 {
698         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
699         intel_wakeref_t wakeref = fetch_and_zero(&dig_port->tc_lock_wakeref);
700
701         mutex_unlock(&dig_port->tc_lock);
702
703         intel_display_power_put_async(i915, POWER_DOMAIN_DISPLAY_CORE,
704                                       wakeref);
705 }
706
707 bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
708 {
709         return mutex_is_locked(&dig_port->tc_lock) ||
710                dig_port->tc_link_refcount;
711 }
712
713 void intel_tc_port_get_link(struct intel_digital_port *dig_port,
714                             int required_lanes)
715 {
716         __intel_tc_port_lock(dig_port, required_lanes);
717         dig_port->tc_link_refcount++;
718         intel_tc_port_unlock(dig_port);
719 }
720
721 void intel_tc_port_put_link(struct intel_digital_port *dig_port)
722 {
723         mutex_lock(&dig_port->tc_lock);
724         dig_port->tc_link_refcount--;
725         mutex_unlock(&dig_port->tc_lock);
726 }
727
728 static bool
729 tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
730 {
731         intel_wakeref_t wakeref;
732         u32 val;
733
734         if (!INTEL_INFO(i915)->display.has_modular_fia)
735                 return false;
736
737         mutex_lock(&dig_port->tc_lock);
738         wakeref = tc_cold_block(dig_port);
739         val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1));
740         tc_cold_unblock(dig_port, wakeref);
741         mutex_unlock(&dig_port->tc_lock);
742
743         drm_WARN_ON(&i915->drm, val == 0xffffffff);
744
745         return val & MODULAR_FIA_MASK;
746 }
747
748 static void
749 tc_port_load_fia_params(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
750 {
751         enum port port = dig_port->base.port;
752         enum tc_port tc_port = intel_port_to_tc(i915, port);
753
754         /*
755          * Each Modular FIA instance houses 2 TC ports. In SOC that has more
756          * than two TC ports, there are multiple instances of Modular FIA.
757          */
758         if (tc_has_modular_fia(i915, dig_port)) {
759                 dig_port->tc_phy_fia = tc_port / 2;
760                 dig_port->tc_phy_fia_idx = tc_port % 2;
761         } else {
762                 dig_port->tc_phy_fia = FIA1;
763                 dig_port->tc_phy_fia_idx = tc_port;
764         }
765 }
766
767 void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
768 {
769         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
770         enum port port = dig_port->base.port;
771         enum tc_port tc_port = intel_port_to_tc(i915, port);
772
773         if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
774                 return;
775
776         snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name),
777                  "%c/TC#%d", port_name(port), tc_port + 1);
778
779         mutex_init(&dig_port->tc_lock);
780         dig_port->tc_legacy_port = is_legacy;
781         dig_port->tc_link_refcount = 0;
782         tc_port_load_fia_params(i915, dig_port);
783 }
This page took 0.08854 seconds and 4 git commands to generate.