]>
Commit | Line | Data |
---|---|---|
77913b39 JN |
1 | /* |
2 | * Copyright © 2015 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | */ | |
23 | ||
24 | #include <linux/kernel.h> | |
25 | ||
26 | #include <drm/drmP.h> | |
27 | #include <drm/i915_drm.h> | |
28 | ||
29 | #include "i915_drv.h" | |
30 | #include "intel_drv.h" | |
31 | ||
856974a4 JN |
32 | /** |
33 | * DOC: Hotplug | |
34 | * | |
35 | * Simply put, hotplug occurs when a display is connected to or disconnected | |
36 | * from the system. However, there may be adapters and docking stations and | |
37 | * Display Port short pulses and MST devices involved, complicating matters. | |
38 | * | |
39 | * Hotplug in i915 is handled in many different levels of abstraction. | |
40 | * | |
41 | * The platform dependent interrupt handling code in i915_irq.c enables, | |
42 | * disables, and does preliminary handling of the interrupts. The interrupt | |
43 | * handlers gather the hotplug detect (HPD) information from relevant registers | |
44 | * into a platform independent mask of hotplug pins that have fired. | |
45 | * | |
46 | * The platform independent interrupt handler intel_hpd_irq_handler() in | |
47 | * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes | |
48 | * further processing to appropriate bottom halves (Display Port specific and | |
49 | * regular hotplug). | |
50 | * | |
51 | * The Display Port work function i915_digport_work_func() calls into | |
52 | * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long | |
53 | * pulses, with failures and non-MST long pulses triggering regular hotplug | |
54 | * processing on the connector. | |
55 | * | |
56 | * The regular hotplug work function i915_hotplug_work_func() calls connector | |
57 | * detect hooks, and, if connector status changes, triggers sending of hotplug | |
58 | * uevent to userspace via drm_kms_helper_hotplug_event(). | |
59 | * | |
60 | * Finally, the userspace is responsible for triggering a modeset upon receiving | |
61 | * the hotplug uevent, disabling or enabling the crtc as needed. | |
62 | * | |
63 | * The hotplug interrupt storm detection and mitigation code keeps track of the | |
64 | * number of interrupts per hotplug pin per a period of time, and if the number | |
65 | * of interrupts exceeds a certain threshold, the interrupt is disabled for a | |
66 | * while before being re-enabled. The intention is to mitigate issues raising | |
67 | * from broken hardware triggering massive amounts of interrupts and grinding | |
68 | * the system to a halt. | |
feecb691 TS |
69 | * |
70 | * Current implementation expects that hotplug interrupt storm will not be | |
71 | * seen when display port sink is connected, hence on platforms whose DP | |
72 | * callback is handled by i915_digport_work_func reenabling of hpd is not | |
73 | * performed (it was never expected to be disabled in the first place ;) ) | |
74 | * this is specific to DP sinks handled by this routine and any other display | |
75 | * such as HDMI or DVI enabled on the same port will have proper logic since | |
76 | * it will use i915_hotplug_work_func where this logic is handled. | |
856974a4 JN |
77 | */ |
78 | ||
f761bef2 | 79 | /** |
cf53902f RV |
80 | * intel_hpd_pin_default - return default pin associated with certain port. |
81 | * @dev_priv: private driver data pointer | |
f761bef2 RV |
82 | * @port: the hpd port to get associated pin |
83 | * | |
cf53902f RV |
84 | * It is only valid and used by digital port encoder. |
85 | * | |
f761bef2 RV |
86 | * Return pin that is associatade with @port and HDP_NONE if no pin is |
87 | * hard associated with that @port. | |
88 | */ | |
cf53902f RV |
89 | enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv, |
90 | enum port port) | |
f761bef2 RV |
91 | { |
92 | switch (port) { | |
93 | case PORT_A: | |
94 | return HPD_PORT_A; | |
95 | case PORT_B: | |
96 | return HPD_PORT_B; | |
97 | case PORT_C: | |
98 | return HPD_PORT_C; | |
99 | case PORT_D: | |
100 | return HPD_PORT_D; | |
101 | case PORT_E: | |
102 | return HPD_PORT_E; | |
cf53902f RV |
103 | case PORT_F: |
104 | if (IS_CNL_WITH_PORT_F(dev_priv)) | |
105 | return HPD_PORT_E; | |
96ae4831 | 106 | return HPD_PORT_F; |
f761bef2 RV |
107 | default: |
108 | MISSING_CASE(port); | |
109 | return HPD_NONE; | |
110 | } | |
111 | } | |
112 | ||
77913b39 | 113 | #define HPD_STORM_DETECT_PERIOD 1000 |
77913b39 JN |
114 | #define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000) |
115 | ||
116 | /** | |
117 | * intel_hpd_irq_storm_detect - gather stats and detect HPD irq storm on a pin | |
118 | * @dev_priv: private driver data pointer | |
119 | * @pin: the pin to gather stats on | |
120 | * | |
121 | * Gather stats about HPD irqs from the specified @pin, and detect irq | |
122 | * storms. Only the pin specific stats and state are changed, the caller is | |
123 | * responsible for further action. | |
124 | * | |
317eaa95 L |
125 | * The number of irqs that are allowed within @HPD_STORM_DETECT_PERIOD is |
126 | * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to | |
127 | * @HPD_STORM_DEFAULT_THRESHOLD. If this threshold is exceeded, it's | |
128 | * considered an irq storm and the irq state is set to @HPD_MARK_DISABLED. | |
129 | * | |
130 | * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs, | |
131 | * and should only be adjusted for automated hotplug testing. | |
77913b39 JN |
132 | * |
133 | * Return true if an irq storm was detected on @pin. | |
134 | */ | |
135 | static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, | |
136 | enum hpd_pin pin) | |
137 | { | |
138 | unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies; | |
139 | unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD); | |
317eaa95 | 140 | const int threshold = dev_priv->hotplug.hpd_storm_threshold; |
77913b39 JN |
141 | bool storm = false; |
142 | ||
143 | if (!time_in_range(jiffies, start, end)) { | |
144 | dev_priv->hotplug.stats[pin].last_jiffies = jiffies; | |
145 | dev_priv->hotplug.stats[pin].count = 0; | |
146 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin); | |
317eaa95 L |
147 | } else if (dev_priv->hotplug.stats[pin].count > threshold && |
148 | threshold) { | |
77913b39 JN |
149 | dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED; |
150 | DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin); | |
151 | storm = true; | |
152 | } else { | |
153 | dev_priv->hotplug.stats[pin].count++; | |
154 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin, | |
155 | dev_priv->hotplug.stats[pin].count); | |
156 | } | |
157 | ||
158 | return storm; | |
159 | } | |
160 | ||
161 | static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) | |
162 | { | |
91c8a326 | 163 | struct drm_device *dev = &dev_priv->drm; |
77913b39 JN |
164 | struct intel_connector *intel_connector; |
165 | struct intel_encoder *intel_encoder; | |
166 | struct drm_connector *connector; | |
cc3ca4f3 | 167 | struct drm_connector_list_iter conn_iter; |
77913b39 JN |
168 | enum hpd_pin pin; |
169 | bool hpd_disabled = false; | |
170 | ||
67520415 | 171 | lockdep_assert_held(&dev_priv->irq_lock); |
77913b39 | 172 | |
cc3ca4f3 SV |
173 | drm_connector_list_iter_begin(dev, &conn_iter); |
174 | drm_for_each_connector_iter(connector, &conn_iter) { | |
77913b39 JN |
175 | if (connector->polled != DRM_CONNECTOR_POLL_HPD) |
176 | continue; | |
177 | ||
178 | intel_connector = to_intel_connector(connector); | |
179 | intel_encoder = intel_connector->encoder; | |
180 | if (!intel_encoder) | |
181 | continue; | |
182 | ||
183 | pin = intel_encoder->hpd_pin; | |
184 | if (pin == HPD_NONE || | |
185 | dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED) | |
186 | continue; | |
187 | ||
188 | DRM_INFO("HPD interrupt storm detected on connector %s: " | |
189 | "switching from hotplug detection to polling\n", | |
190 | connector->name); | |
191 | ||
192 | dev_priv->hotplug.stats[pin].state = HPD_DISABLED; | |
193 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | |
194 | | DRM_CONNECTOR_POLL_DISCONNECT; | |
195 | hpd_disabled = true; | |
196 | } | |
cc3ca4f3 | 197 | drm_connector_list_iter_end(&conn_iter); |
77913b39 JN |
198 | |
199 | /* Enable polling and queue hotplug re-enabling. */ | |
200 | if (hpd_disabled) { | |
c4d79c22 | 201 | drm_kms_helper_poll_enable(dev); |
77913b39 JN |
202 | mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, |
203 | msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); | |
204 | } | |
205 | } | |
206 | ||
207 | static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) | |
208 | { | |
209 | struct drm_i915_private *dev_priv = | |
210 | container_of(work, typeof(*dev_priv), | |
211 | hotplug.reenable_work.work); | |
91c8a326 | 212 | struct drm_device *dev = &dev_priv->drm; |
e9be2850 | 213 | enum hpd_pin pin; |
77913b39 JN |
214 | |
215 | intel_runtime_pm_get(dev_priv); | |
216 | ||
217 | spin_lock_irq(&dev_priv->irq_lock); | |
e9be2850 | 218 | for_each_hpd_pin(pin) { |
77913b39 | 219 | struct drm_connector *connector; |
cc3ca4f3 | 220 | struct drm_connector_list_iter conn_iter; |
77913b39 | 221 | |
e9be2850 | 222 | if (dev_priv->hotplug.stats[pin].state != HPD_DISABLED) |
77913b39 JN |
223 | continue; |
224 | ||
e9be2850 | 225 | dev_priv->hotplug.stats[pin].state = HPD_ENABLED; |
77913b39 | 226 | |
cc3ca4f3 SV |
227 | drm_connector_list_iter_begin(dev, &conn_iter); |
228 | drm_for_each_connector_iter(connector, &conn_iter) { | |
77913b39 JN |
229 | struct intel_connector *intel_connector = to_intel_connector(connector); |
230 | ||
541ff7e9 LP |
231 | /* Don't check MST ports, they don't have pins */ |
232 | if (!intel_connector->mst_port && | |
233 | intel_connector->encoder->hpd_pin == pin) { | |
77913b39 JN |
234 | if (connector->polled != intel_connector->polled) |
235 | DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", | |
236 | connector->name); | |
237 | connector->polled = intel_connector->polled; | |
238 | if (!connector->polled) | |
239 | connector->polled = DRM_CONNECTOR_POLL_HPD; | |
240 | } | |
241 | } | |
cc3ca4f3 | 242 | drm_connector_list_iter_end(&conn_iter); |
77913b39 | 243 | } |
262fd485 | 244 | if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) |
91d14251 | 245 | dev_priv->display.hpd_irq_setup(dev_priv); |
77913b39 JN |
246 | spin_unlock_irq(&dev_priv->irq_lock); |
247 | ||
248 | intel_runtime_pm_put(dev_priv); | |
249 | } | |
250 | ||
dba14b27 VS |
251 | bool intel_encoder_hotplug(struct intel_encoder *encoder, |
252 | struct intel_connector *connector) | |
77913b39 | 253 | { |
dba14b27 | 254 | struct drm_device *dev = connector->base.dev; |
77913b39 JN |
255 | enum drm_connector_status old_status; |
256 | ||
257 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); | |
dba14b27 | 258 | old_status = connector->base.status; |
77913b39 | 259 | |
dba14b27 VS |
260 | connector->base.status = |
261 | drm_helper_probe_detect(&connector->base, NULL, false); | |
6c5ed5ae | 262 | |
dba14b27 | 263 | if (old_status == connector->base.status) |
77913b39 JN |
264 | return false; |
265 | ||
266 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", | |
dba14b27 VS |
267 | connector->base.base.id, |
268 | connector->base.name, | |
77913b39 | 269 | drm_get_connector_status_name(old_status), |
dba14b27 | 270 | drm_get_connector_status_name(connector->base.status)); |
77913b39 JN |
271 | |
272 | return true; | |
273 | } | |
274 | ||
b6ca3eee VS |
275 | static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder) |
276 | { | |
277 | return intel_encoder_is_dig_port(encoder) && | |
278 | enc_to_dig_port(&encoder->base)->hpd_pulse != NULL; | |
279 | } | |
280 | ||
77913b39 JN |
281 | static void i915_digport_work_func(struct work_struct *work) |
282 | { | |
283 | struct drm_i915_private *dev_priv = | |
284 | container_of(work, struct drm_i915_private, hotplug.dig_port_work); | |
285 | u32 long_port_mask, short_port_mask; | |
b6ca3eee | 286 | struct intel_encoder *encoder; |
77913b39 JN |
287 | u32 old_bits = 0; |
288 | ||
289 | spin_lock_irq(&dev_priv->irq_lock); | |
290 | long_port_mask = dev_priv->hotplug.long_port_mask; | |
291 | dev_priv->hotplug.long_port_mask = 0; | |
292 | short_port_mask = dev_priv->hotplug.short_port_mask; | |
293 | dev_priv->hotplug.short_port_mask = 0; | |
294 | spin_unlock_irq(&dev_priv->irq_lock); | |
295 | ||
b6ca3eee VS |
296 | for_each_intel_encoder(&dev_priv->drm, encoder) { |
297 | struct intel_digital_port *dig_port; | |
298 | enum port port = encoder->port; | |
299 | bool long_hpd, short_hpd; | |
300 | enum irqreturn ret; | |
301 | ||
302 | if (!intel_encoder_has_hpd_pulse(encoder)) | |
77913b39 JN |
303 | continue; |
304 | ||
b6ca3eee VS |
305 | long_hpd = long_port_mask & BIT(port); |
306 | short_hpd = short_port_mask & BIT(port); | |
307 | ||
308 | if (!long_hpd && !short_hpd) | |
309 | continue; | |
77913b39 | 310 | |
b6ca3eee | 311 | dig_port = enc_to_dig_port(&encoder->base); |
77913b39 | 312 | |
b6ca3eee VS |
313 | ret = dig_port->hpd_pulse(dig_port, long_hpd); |
314 | if (ret == IRQ_NONE) { | |
315 | /* fall back to old school hpd */ | |
316 | old_bits |= BIT(encoder->hpd_pin); | |
77913b39 JN |
317 | } |
318 | } | |
319 | ||
320 | if (old_bits) { | |
321 | spin_lock_irq(&dev_priv->irq_lock); | |
322 | dev_priv->hotplug.event_bits |= old_bits; | |
323 | spin_unlock_irq(&dev_priv->irq_lock); | |
324 | schedule_work(&dev_priv->hotplug.hotplug_work); | |
325 | } | |
326 | } | |
327 | ||
328 | /* | |
329 | * Handle hotplug events outside the interrupt handler proper. | |
330 | */ | |
331 | static void i915_hotplug_work_func(struct work_struct *work) | |
332 | { | |
333 | struct drm_i915_private *dev_priv = | |
334 | container_of(work, struct drm_i915_private, hotplug.hotplug_work); | |
91c8a326 | 335 | struct drm_device *dev = &dev_priv->drm; |
77913b39 JN |
336 | struct intel_connector *intel_connector; |
337 | struct intel_encoder *intel_encoder; | |
338 | struct drm_connector *connector; | |
cc3ca4f3 | 339 | struct drm_connector_list_iter conn_iter; |
77913b39 JN |
340 | bool changed = false; |
341 | u32 hpd_event_bits; | |
342 | ||
cc3ca4f3 | 343 | mutex_lock(&dev->mode_config.mutex); |
77913b39 JN |
344 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); |
345 | ||
346 | spin_lock_irq(&dev_priv->irq_lock); | |
347 | ||
348 | hpd_event_bits = dev_priv->hotplug.event_bits; | |
349 | dev_priv->hotplug.event_bits = 0; | |
350 | ||
351 | /* Disable hotplug on connectors that hit an irq storm. */ | |
352 | intel_hpd_irq_storm_disable(dev_priv); | |
353 | ||
354 | spin_unlock_irq(&dev_priv->irq_lock); | |
355 | ||
cc3ca4f3 SV |
356 | drm_connector_list_iter_begin(dev, &conn_iter); |
357 | drm_for_each_connector_iter(connector, &conn_iter) { | |
77913b39 JN |
358 | intel_connector = to_intel_connector(connector); |
359 | if (!intel_connector->encoder) | |
360 | continue; | |
361 | intel_encoder = intel_connector->encoder; | |
362 | if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { | |
363 | DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", | |
364 | connector->name, intel_encoder->hpd_pin); | |
dba14b27 VS |
365 | |
366 | changed |= intel_encoder->hotplug(intel_encoder, | |
367 | intel_connector); | |
77913b39 JN |
368 | } |
369 | } | |
cc3ca4f3 SV |
370 | drm_connector_list_iter_end(&conn_iter); |
371 | mutex_unlock(&dev->mode_config.mutex); | |
77913b39 JN |
372 | |
373 | if (changed) | |
374 | drm_kms_helper_hotplug_event(dev); | |
375 | } | |
376 | ||
377 | ||
378 | /** | |
379 | * intel_hpd_irq_handler - main hotplug irq handler | |
91d14251 | 380 | * @dev_priv: drm_i915_private |
77913b39 JN |
381 | * @pin_mask: a mask of hpd pins that have triggered the irq |
382 | * @long_mask: a mask of hpd pins that may be long hpd pulses | |
383 | * | |
384 | * This is the main hotplug irq handler for all platforms. The platform specific | |
385 | * irq handlers call the platform specific hotplug irq handlers, which read and | |
386 | * decode the appropriate registers into bitmasks about hpd pins that have | |
387 | * triggered (@pin_mask), and which of those pins may be long pulses | |
388 | * (@long_mask). The @long_mask is ignored if the port corresponding to the pin | |
389 | * is not a digital port. | |
390 | * | |
391 | * Here, we do hotplug irq storm detection and mitigation, and pass further | |
392 | * processing to appropriate bottom halves. | |
393 | */ | |
91d14251 | 394 | void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, |
77913b39 JN |
395 | u32 pin_mask, u32 long_mask) |
396 | { | |
b6ca3eee | 397 | struct intel_encoder *encoder; |
77913b39 JN |
398 | bool storm_detected = false; |
399 | bool queue_dig = false, queue_hp = false; | |
44a7276b VS |
400 | u32 long_hpd_pulse_mask = 0; |
401 | u32 short_hpd_pulse_mask = 0; | |
402 | enum hpd_pin pin; | |
77913b39 JN |
403 | |
404 | if (!pin_mask) | |
405 | return; | |
406 | ||
407 | spin_lock(&dev_priv->irq_lock); | |
44a7276b VS |
408 | |
409 | /* | |
410 | * Determine whether ->hpd_pulse() exists for each pin, and | |
411 | * whether we have a short or a long pulse. This is needed | |
412 | * as each pin may have up to two encoders (HDMI and DP) and | |
413 | * only the one of them (DP) will have ->hpd_pulse(). | |
414 | */ | |
b6ca3eee | 415 | for_each_intel_encoder(&dev_priv->drm, encoder) { |
b6ca3eee | 416 | bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder); |
44a7276b VS |
417 | enum port port = encoder->port; |
418 | bool long_hpd; | |
b6ca3eee | 419 | |
44a7276b | 420 | pin = encoder->hpd_pin; |
e9be2850 | 421 | if (!(BIT(pin) & pin_mask)) |
77913b39 JN |
422 | continue; |
423 | ||
44a7276b VS |
424 | if (!has_hpd_pulse) |
425 | continue; | |
77913b39 | 426 | |
44a7276b VS |
427 | long_hpd = long_mask & BIT(pin); |
428 | ||
429 | DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port), | |
430 | long_hpd ? "long" : "short"); | |
431 | queue_dig = true; | |
432 | ||
433 | if (long_hpd) { | |
434 | long_hpd_pulse_mask |= BIT(pin); | |
435 | dev_priv->hotplug.long_port_mask |= BIT(port); | |
436 | } else { | |
437 | short_hpd_pulse_mask |= BIT(pin); | |
438 | dev_priv->hotplug.short_port_mask |= BIT(port); | |
77913b39 | 439 | } |
44a7276b VS |
440 | } |
441 | ||
442 | /* Now process each pin just once */ | |
443 | for_each_hpd_pin(pin) { | |
444 | bool long_hpd; | |
445 | ||
446 | if (!(BIT(pin) & pin_mask)) | |
447 | continue; | |
77913b39 | 448 | |
e9be2850 | 449 | if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) { |
77913b39 JN |
450 | /* |
451 | * On GMCH platforms the interrupt mask bits only | |
452 | * prevent irq generation, not the setting of the | |
453 | * hotplug bits itself. So only WARN about unexpected | |
454 | * interrupts on saner platforms. | |
455 | */ | |
91d14251 | 456 | WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv), |
e9be2850 | 457 | "Received HPD interrupt on pin %d although disabled\n", pin); |
77913b39 JN |
458 | continue; |
459 | } | |
460 | ||
e9be2850 | 461 | if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED) |
77913b39 JN |
462 | continue; |
463 | ||
44a7276b VS |
464 | /* |
465 | * Delegate to ->hpd_pulse() if one of the encoders for this | |
466 | * pin has it, otherwise let the hotplug_work deal with this | |
467 | * pin directly. | |
468 | */ | |
469 | if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) { | |
470 | long_hpd = long_hpd_pulse_mask & BIT(pin); | |
471 | } else { | |
e9be2850 | 472 | dev_priv->hotplug.event_bits |= BIT(pin); |
44a7276b | 473 | long_hpd = true; |
77913b39 JN |
474 | queue_hp = true; |
475 | } | |
476 | ||
44a7276b VS |
477 | if (!long_hpd) |
478 | continue; | |
479 | ||
e9be2850 VS |
480 | if (intel_hpd_irq_storm_detect(dev_priv, pin)) { |
481 | dev_priv->hotplug.event_bits &= ~BIT(pin); | |
77913b39 JN |
482 | storm_detected = true; |
483 | } | |
484 | } | |
485 | ||
262fd485 | 486 | if (storm_detected && dev_priv->display_irqs_enabled) |
91d14251 | 487 | dev_priv->display.hpd_irq_setup(dev_priv); |
77913b39 JN |
488 | spin_unlock(&dev_priv->irq_lock); |
489 | ||
490 | /* | |
491 | * Our hotplug handler can grab modeset locks (by calling down into the | |
492 | * fb helpers). Hence it must not be run on our own dev-priv->wq work | |
493 | * queue for otherwise the flush_work in the pageflip code will | |
494 | * deadlock. | |
495 | */ | |
496 | if (queue_dig) | |
497 | queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work); | |
498 | if (queue_hp) | |
499 | schedule_work(&dev_priv->hotplug.hotplug_work); | |
500 | } | |
501 | ||
502 | /** | |
503 | * intel_hpd_init - initializes and enables hpd support | |
504 | * @dev_priv: i915 device instance | |
505 | * | |
506 | * This function enables the hotplug support. It requires that interrupts have | |
507 | * already been enabled with intel_irq_init_hw(). From this point on hotplug and | |
508 | * poll request can run concurrently to other code, so locking rules must be | |
509 | * obeyed. | |
510 | * | |
511 | * This is a separate step from interrupt enabling to simplify the locking rules | |
512 | * in the driver load and resume code. | |
19625e85 L |
513 | * |
514 | * Also see: intel_hpd_poll_init(), which enables connector polling | |
77913b39 JN |
515 | */ |
516 | void intel_hpd_init(struct drm_i915_private *dev_priv) | |
517 | { | |
77913b39 JN |
518 | int i; |
519 | ||
520 | for_each_hpd_pin(i) { | |
521 | dev_priv->hotplug.stats[i].count = 0; | |
522 | dev_priv->hotplug.stats[i].state = HPD_ENABLED; | |
523 | } | |
19625e85 L |
524 | |
525 | WRITE_ONCE(dev_priv->hotplug.poll_enabled, false); | |
526 | schedule_work(&dev_priv->hotplug.poll_init_work); | |
527 | ||
528 | /* | |
529 | * Interrupt setup is already guaranteed to be single-threaded, this is | |
530 | * just to make the assert_spin_locked checks happy. | |
531 | */ | |
262fd485 CW |
532 | if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) { |
533 | spin_lock_irq(&dev_priv->irq_lock); | |
534 | if (dev_priv->display_irqs_enabled) | |
535 | dev_priv->display.hpd_irq_setup(dev_priv); | |
536 | spin_unlock_irq(&dev_priv->irq_lock); | |
537 | } | |
19625e85 L |
538 | } |
539 | ||
24808e96 CW |
540 | static void i915_hpd_poll_init_work(struct work_struct *work) |
541 | { | |
19625e85 L |
542 | struct drm_i915_private *dev_priv = |
543 | container_of(work, struct drm_i915_private, | |
544 | hotplug.poll_init_work); | |
545 | struct drm_device *dev = &dev_priv->drm; | |
19625e85 | 546 | struct drm_connector *connector; |
cc3ca4f3 | 547 | struct drm_connector_list_iter conn_iter; |
19625e85 L |
548 | bool enabled; |
549 | ||
550 | mutex_lock(&dev->mode_config.mutex); | |
551 | ||
552 | enabled = READ_ONCE(dev_priv->hotplug.poll_enabled); | |
553 | ||
cc3ca4f3 SV |
554 | drm_connector_list_iter_begin(dev, &conn_iter); |
555 | drm_for_each_connector_iter(connector, &conn_iter) { | |
19625e85 L |
556 | struct intel_connector *intel_connector = |
557 | to_intel_connector(connector); | |
77913b39 | 558 | connector->polled = intel_connector->polled; |
07c51913 L |
559 | |
560 | /* MST has a dynamic intel_connector->encoder and it's reprobing | |
561 | * is all handled by the MST helpers. */ | |
77913b39 | 562 | if (intel_connector->mst_port) |
07c51913 L |
563 | continue; |
564 | ||
56b857a5 | 565 | if (!connector->polled && I915_HAS_HOTPLUG(dev_priv) && |
19625e85 L |
566 | intel_connector->encoder->hpd_pin > HPD_NONE) { |
567 | connector->polled = enabled ? | |
568 | DRM_CONNECTOR_POLL_CONNECT | | |
569 | DRM_CONNECTOR_POLL_DISCONNECT : | |
570 | DRM_CONNECTOR_POLL_HPD; | |
571 | } | |
77913b39 | 572 | } |
cc3ca4f3 | 573 | drm_connector_list_iter_end(&conn_iter); |
77913b39 | 574 | |
19625e85 | 575 | if (enabled) |
c4d79c22 | 576 | drm_kms_helper_poll_enable(dev); |
19625e85 L |
577 | |
578 | mutex_unlock(&dev->mode_config.mutex); | |
579 | ||
77913b39 | 580 | /* |
19625e85 L |
581 | * We might have missed any hotplugs that happened while we were |
582 | * in the middle of disabling polling | |
77913b39 | 583 | */ |
19625e85 L |
584 | if (!enabled) |
585 | drm_helper_hpd_irq_event(dev); | |
586 | } | |
587 | ||
588 | /** | |
589 | * intel_hpd_poll_init - enables/disables polling for connectors with hpd | |
590 | * @dev_priv: i915 device instance | |
19625e85 L |
591 | * |
592 | * This function enables polling for all connectors, regardless of whether or | |
593 | * not they support hotplug detection. Under certain conditions HPD may not be | |
594 | * functional. On most Intel GPUs, this happens when we enter runtime suspend. | |
595 | * On Valleyview and Cherryview systems, this also happens when we shut off all | |
596 | * of the powerwells. | |
597 | * | |
598 | * Since this function can get called in contexts where we're already holding | |
599 | * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate | |
600 | * worker. | |
601 | * | |
602 | * Also see: intel_hpd_init(), which restores hpd handling. | |
603 | */ | |
604 | void intel_hpd_poll_init(struct drm_i915_private *dev_priv) | |
605 | { | |
606 | WRITE_ONCE(dev_priv->hotplug.poll_enabled, true); | |
607 | ||
608 | /* | |
609 | * We might already be holding dev->mode_config.mutex, so do this in a | |
610 | * seperate worker | |
611 | * As well, there's no issue if we race here since we always reschedule | |
612 | * this worker anyway | |
613 | */ | |
614 | schedule_work(&dev_priv->hotplug.poll_init_work); | |
77913b39 JN |
615 | } |
616 | ||
617 | void intel_hpd_init_work(struct drm_i915_private *dev_priv) | |
618 | { | |
619 | INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func); | |
620 | INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func); | |
19625e85 | 621 | INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work); |
77913b39 JN |
622 | INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work, |
623 | intel_hpd_irq_storm_reenable_work); | |
624 | } | |
625 | ||
626 | void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) | |
627 | { | |
628 | spin_lock_irq(&dev_priv->irq_lock); | |
629 | ||
630 | dev_priv->hotplug.long_port_mask = 0; | |
631 | dev_priv->hotplug.short_port_mask = 0; | |
632 | dev_priv->hotplug.event_bits = 0; | |
633 | ||
634 | spin_unlock_irq(&dev_priv->irq_lock); | |
635 | ||
636 | cancel_work_sync(&dev_priv->hotplug.dig_port_work); | |
637 | cancel_work_sync(&dev_priv->hotplug.hotplug_work); | |
19625e85 | 638 | cancel_work_sync(&dev_priv->hotplug.poll_init_work); |
77913b39 JN |
639 | cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work); |
640 | } | |
b236d7c8 L |
641 | |
642 | bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin) | |
643 | { | |
644 | bool ret = false; | |
645 | ||
646 | if (pin == HPD_NONE) | |
647 | return false; | |
648 | ||
649 | spin_lock_irq(&dev_priv->irq_lock); | |
650 | if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) { | |
651 | dev_priv->hotplug.stats[pin].state = HPD_DISABLED; | |
652 | ret = true; | |
653 | } | |
654 | spin_unlock_irq(&dev_priv->irq_lock); | |
655 | ||
656 | return ret; | |
657 | } | |
658 | ||
659 | void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin) | |
660 | { | |
661 | if (pin == HPD_NONE) | |
662 | return; | |
663 | ||
664 | spin_lock_irq(&dev_priv->irq_lock); | |
665 | dev_priv->hotplug.stats[pin].state = HPD_ENABLED; | |
666 | spin_unlock_irq(&dev_priv->irq_lock); | |
667 | } |