]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/i915_irq.c
Merge tag 'drm-intel-next-2019-05-24' of git://anongit.freedesktop.org/drm/drm-intel...
[linux.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/circ_buf.h>
32 #include <linux/cpuidle.h>
33 #include <linux/slab.h>
34 #include <linux/sysrq.h>
35
36 #include <drm/drm_drv.h>
37 #include <drm/drm_irq.h>
38 #include <drm/i915_drm.h>
39
40 #include "i915_drv.h"
41 #include "i915_irq.h"
42 #include "i915_trace.h"
43 #include "intel_drv.h"
44 #include "intel_fifo_underrun.h"
45 #include "intel_hotplug.h"
46 #include "intel_lpe_audio.h"
47 #include "intel_psr.h"
48
49 /**
50  * DOC: interrupt handling
51  *
52  * These functions provide the basic support for enabling and disabling the
53  * interrupt handling support. There's a lot more functionality in i915_irq.c
54  * and related files, but that will be described in separate chapters.
55  */
56
57 static const u32 hpd_ilk[HPD_NUM_PINS] = {
58         [HPD_PORT_A] = DE_DP_A_HOTPLUG,
59 };
60
61 static const u32 hpd_ivb[HPD_NUM_PINS] = {
62         [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
63 };
64
65 static const u32 hpd_bdw[HPD_NUM_PINS] = {
66         [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
67 };
68
69 static const u32 hpd_ibx[HPD_NUM_PINS] = {
70         [HPD_CRT] = SDE_CRT_HOTPLUG,
71         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
72         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
73         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
74         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
75 };
76
77 static const u32 hpd_cpt[HPD_NUM_PINS] = {
78         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
79         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
80         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
81         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
82         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
83 };
84
85 static const u32 hpd_spt[HPD_NUM_PINS] = {
86         [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
87         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
88         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
89         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
90         [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
91 };
92
93 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
94         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
95         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
96         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
97         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
98         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
99         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
100 };
101
102 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
103         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
105         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
106         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109 };
110
111 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
112         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
113         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
114         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
115         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
116         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
117         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
118 };
119
120 /* BXT hpd list */
121 static const u32 hpd_bxt[HPD_NUM_PINS] = {
122         [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
123         [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
124         [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
125 };
126
127 static const u32 hpd_gen11[HPD_NUM_PINS] = {
128         [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
129         [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
130         [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
131         [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
132 };
133
134 static const u32 hpd_icp[HPD_NUM_PINS] = {
135         [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
136         [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
137         [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
138         [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
139         [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
140         [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
141 };
142
143 static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
144                            i915_reg_t iir, i915_reg_t ier)
145 {
146         intel_uncore_write(uncore, imr, 0xffffffff);
147         intel_uncore_posting_read(uncore, imr);
148
149         intel_uncore_write(uncore, ier, 0);
150
151         /* IIR can theoretically queue up two events. Be paranoid. */
152         intel_uncore_write(uncore, iir, 0xffffffff);
153         intel_uncore_posting_read(uncore, iir);
154         intel_uncore_write(uncore, iir, 0xffffffff);
155         intel_uncore_posting_read(uncore, iir);
156 }
157
158 static void gen2_irq_reset(struct intel_uncore *uncore)
159 {
160         intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
161         intel_uncore_posting_read16(uncore, GEN2_IMR);
162
163         intel_uncore_write16(uncore, GEN2_IER, 0);
164
165         /* IIR can theoretically queue up two events. Be paranoid. */
166         intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
167         intel_uncore_posting_read16(uncore, GEN2_IIR);
168         intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
169         intel_uncore_posting_read16(uncore, GEN2_IIR);
170 }
171
172 #define GEN8_IRQ_RESET_NDX(uncore, type, which) \
173 ({ \
174         unsigned int which_ = which; \
175         gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
176                        GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
177 })
178
179 #define GEN3_IRQ_RESET(uncore, type) \
180         gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
181
182 #define GEN2_IRQ_RESET(uncore) \
183         gen2_irq_reset(uncore)
184
185 /*
186  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
187  */
188 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
189 {
190         u32 val = intel_uncore_read(uncore, reg);
191
192         if (val == 0)
193                 return;
194
195         WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
196              i915_mmio_reg_offset(reg), val);
197         intel_uncore_write(uncore, reg, 0xffffffff);
198         intel_uncore_posting_read(uncore, reg);
199         intel_uncore_write(uncore, reg, 0xffffffff);
200         intel_uncore_posting_read(uncore, reg);
201 }
202
203 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
204 {
205         u16 val = intel_uncore_read16(uncore, GEN2_IIR);
206
207         if (val == 0)
208                 return;
209
210         WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
211              i915_mmio_reg_offset(GEN2_IIR), val);
212         intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
213         intel_uncore_posting_read16(uncore, GEN2_IIR);
214         intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
215         intel_uncore_posting_read16(uncore, GEN2_IIR);
216 }
217
218 static void gen3_irq_init(struct intel_uncore *uncore,
219                           i915_reg_t imr, u32 imr_val,
220                           i915_reg_t ier, u32 ier_val,
221                           i915_reg_t iir)
222 {
223         gen3_assert_iir_is_zero(uncore, iir);
224
225         intel_uncore_write(uncore, ier, ier_val);
226         intel_uncore_write(uncore, imr, imr_val);
227         intel_uncore_posting_read(uncore, imr);
228 }
229
230 static void gen2_irq_init(struct intel_uncore *uncore,
231                           u32 imr_val, u32 ier_val)
232 {
233         gen2_assert_iir_is_zero(uncore);
234
235         intel_uncore_write16(uncore, GEN2_IER, ier_val);
236         intel_uncore_write16(uncore, GEN2_IMR, imr_val);
237         intel_uncore_posting_read16(uncore, GEN2_IMR);
238 }
239
240 #define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \
241 ({ \
242         unsigned int which_ = which; \
243         gen3_irq_init((uncore), \
244                       GEN8_##type##_IMR(which_), imr_val, \
245                       GEN8_##type##_IER(which_), ier_val, \
246                       GEN8_##type##_IIR(which_)); \
247 })
248
249 #define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \
250         gen3_irq_init((uncore), \
251                       type##IMR, imr_val, \
252                       type##IER, ier_val, \
253                       type##IIR)
254
255 #define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
256         gen2_irq_init((uncore), imr_val, ier_val)
257
258 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
259 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
260
261 /* For display hotplug interrupt */
262 static inline void
263 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
264                                      u32 mask,
265                                      u32 bits)
266 {
267         u32 val;
268
269         lockdep_assert_held(&dev_priv->irq_lock);
270         WARN_ON(bits & ~mask);
271
272         val = I915_READ(PORT_HOTPLUG_EN);
273         val &= ~mask;
274         val |= bits;
275         I915_WRITE(PORT_HOTPLUG_EN, val);
276 }
277
278 /**
279  * i915_hotplug_interrupt_update - update hotplug interrupt enable
280  * @dev_priv: driver private
281  * @mask: bits to update
282  * @bits: bits to enable
283  * NOTE: the HPD enable bits are modified both inside and outside
284  * of an interrupt context. To avoid that read-modify-write cycles
285  * interfer, these bits are protected by a spinlock. Since this
286  * function is usually not called from a context where the lock is
287  * held already, this function acquires the lock itself. A non-locking
288  * version is also available.
289  */
290 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
291                                    u32 mask,
292                                    u32 bits)
293 {
294         spin_lock_irq(&dev_priv->irq_lock);
295         i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
296         spin_unlock_irq(&dev_priv->irq_lock);
297 }
298
299 static u32
300 gen11_gt_engine_identity(struct drm_i915_private * const i915,
301                          const unsigned int bank, const unsigned int bit);
302
303 static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
304                                 const unsigned int bank,
305                                 const unsigned int bit)
306 {
307         void __iomem * const regs = i915->uncore.regs;
308         u32 dw;
309
310         lockdep_assert_held(&i915->irq_lock);
311
312         dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
313         if (dw & BIT(bit)) {
314                 /*
315                  * According to the BSpec, DW_IIR bits cannot be cleared without
316                  * first servicing the Selector & Shared IIR registers.
317                  */
318                 gen11_gt_engine_identity(i915, bank, bit);
319
320                 /*
321                  * We locked GT INT DW by reading it. If we want to (try
322                  * to) recover from this succesfully, we need to clear
323                  * our bit, otherwise we are locking the register for
324                  * everybody.
325                  */
326                 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
327
328                 return true;
329         }
330
331         return false;
332 }
333
334 /**
335  * ilk_update_display_irq - update DEIMR
336  * @dev_priv: driver private
337  * @interrupt_mask: mask of interrupt bits to update
338  * @enabled_irq_mask: mask of interrupt bits to enable
339  */
340 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
341                             u32 interrupt_mask,
342                             u32 enabled_irq_mask)
343 {
344         u32 new_val;
345
346         lockdep_assert_held(&dev_priv->irq_lock);
347
348         WARN_ON(enabled_irq_mask & ~interrupt_mask);
349
350         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
351                 return;
352
353         new_val = dev_priv->irq_mask;
354         new_val &= ~interrupt_mask;
355         new_val |= (~enabled_irq_mask & interrupt_mask);
356
357         if (new_val != dev_priv->irq_mask) {
358                 dev_priv->irq_mask = new_val;
359                 I915_WRITE(DEIMR, dev_priv->irq_mask);
360                 POSTING_READ(DEIMR);
361         }
362 }
363
364 /**
365  * ilk_update_gt_irq - update GTIMR
366  * @dev_priv: driver private
367  * @interrupt_mask: mask of interrupt bits to update
368  * @enabled_irq_mask: mask of interrupt bits to enable
369  */
370 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
371                               u32 interrupt_mask,
372                               u32 enabled_irq_mask)
373 {
374         lockdep_assert_held(&dev_priv->irq_lock);
375
376         WARN_ON(enabled_irq_mask & ~interrupt_mask);
377
378         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
379                 return;
380
381         dev_priv->gt_irq_mask &= ~interrupt_mask;
382         dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
383         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
384 }
385
386 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
387 {
388         ilk_update_gt_irq(dev_priv, mask, mask);
389         POSTING_READ_FW(GTIMR);
390 }
391
392 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
393 {
394         ilk_update_gt_irq(dev_priv, mask, 0);
395 }
396
397 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
398 {
399         WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
400
401         return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
402 }
403
404 static void write_pm_imr(struct drm_i915_private *dev_priv)
405 {
406         i915_reg_t reg;
407         u32 mask = dev_priv->pm_imr;
408
409         if (INTEL_GEN(dev_priv) >= 11) {
410                 reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
411                 /* pm is in upper half */
412                 mask = mask << 16;
413         } else if (INTEL_GEN(dev_priv) >= 8) {
414                 reg = GEN8_GT_IMR(2);
415         } else {
416                 reg = GEN6_PMIMR;
417         }
418
419         I915_WRITE(reg, mask);
420         POSTING_READ(reg);
421 }
422
423 static void write_pm_ier(struct drm_i915_private *dev_priv)
424 {
425         i915_reg_t reg;
426         u32 mask = dev_priv->pm_ier;
427
428         if (INTEL_GEN(dev_priv) >= 11) {
429                 reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
430                 /* pm is in upper half */
431                 mask = mask << 16;
432         } else if (INTEL_GEN(dev_priv) >= 8) {
433                 reg = GEN8_GT_IER(2);
434         } else {
435                 reg = GEN6_PMIER;
436         }
437
438         I915_WRITE(reg, mask);
439 }
440
441 /**
442  * snb_update_pm_irq - update GEN6_PMIMR
443  * @dev_priv: driver private
444  * @interrupt_mask: mask of interrupt bits to update
445  * @enabled_irq_mask: mask of interrupt bits to enable
446  */
447 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
448                               u32 interrupt_mask,
449                               u32 enabled_irq_mask)
450 {
451         u32 new_val;
452
453         WARN_ON(enabled_irq_mask & ~interrupt_mask);
454
455         lockdep_assert_held(&dev_priv->irq_lock);
456
457         new_val = dev_priv->pm_imr;
458         new_val &= ~interrupt_mask;
459         new_val |= (~enabled_irq_mask & interrupt_mask);
460
461         if (new_val != dev_priv->pm_imr) {
462                 dev_priv->pm_imr = new_val;
463                 write_pm_imr(dev_priv);
464         }
465 }
466
467 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
468 {
469         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
470                 return;
471
472         snb_update_pm_irq(dev_priv, mask, mask);
473 }
474
475 static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
476 {
477         snb_update_pm_irq(dev_priv, mask, 0);
478 }
479
480 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
481 {
482         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
483                 return;
484
485         __gen6_mask_pm_irq(dev_priv, mask);
486 }
487
488 static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
489 {
490         i915_reg_t reg = gen6_pm_iir(dev_priv);
491
492         lockdep_assert_held(&dev_priv->irq_lock);
493
494         I915_WRITE(reg, reset_mask);
495         I915_WRITE(reg, reset_mask);
496         POSTING_READ(reg);
497 }
498
499 static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
500 {
501         lockdep_assert_held(&dev_priv->irq_lock);
502
503         dev_priv->pm_ier |= enable_mask;
504         write_pm_ier(dev_priv);
505         gen6_unmask_pm_irq(dev_priv, enable_mask);
506         /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
507 }
508
509 static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
510 {
511         lockdep_assert_held(&dev_priv->irq_lock);
512
513         dev_priv->pm_ier &= ~disable_mask;
514         __gen6_mask_pm_irq(dev_priv, disable_mask);
515         write_pm_ier(dev_priv);
516         /* though a barrier is missing here, but don't really need a one */
517 }
518
519 void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
520 {
521         spin_lock_irq(&dev_priv->irq_lock);
522
523         while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM))
524                 ;
525
526         dev_priv->gt_pm.rps.pm_iir = 0;
527
528         spin_unlock_irq(&dev_priv->irq_lock);
529 }
530
531 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
532 {
533         spin_lock_irq(&dev_priv->irq_lock);
534         gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS);
535         dev_priv->gt_pm.rps.pm_iir = 0;
536         spin_unlock_irq(&dev_priv->irq_lock);
537 }
538
539 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
540 {
541         struct intel_rps *rps = &dev_priv->gt_pm.rps;
542
543         if (READ_ONCE(rps->interrupts_enabled))
544                 return;
545
546         spin_lock_irq(&dev_priv->irq_lock);
547         WARN_ON_ONCE(rps->pm_iir);
548
549         if (INTEL_GEN(dev_priv) >= 11)
550                 WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM));
551         else
552                 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
553
554         rps->interrupts_enabled = true;
555         gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
556
557         spin_unlock_irq(&dev_priv->irq_lock);
558 }
559
560 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
561 {
562         struct intel_rps *rps = &dev_priv->gt_pm.rps;
563
564         if (!READ_ONCE(rps->interrupts_enabled))
565                 return;
566
567         spin_lock_irq(&dev_priv->irq_lock);
568         rps->interrupts_enabled = false;
569
570         I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
571
572         gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
573
574         spin_unlock_irq(&dev_priv->irq_lock);
575         synchronize_irq(dev_priv->drm.irq);
576
577         /* Now that we will not be generating any more work, flush any
578          * outstanding tasks. As we are called on the RPS idle path,
579          * we will reset the GPU to minimum frequencies, so the current
580          * state of the worker can be discarded.
581          */
582         cancel_work_sync(&rps->work);
583         if (INTEL_GEN(dev_priv) >= 11)
584                 gen11_reset_rps_interrupts(dev_priv);
585         else
586                 gen6_reset_rps_interrupts(dev_priv);
587 }
588
589 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
590 {
591         assert_rpm_wakelock_held(dev_priv);
592
593         spin_lock_irq(&dev_priv->irq_lock);
594         gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
595         spin_unlock_irq(&dev_priv->irq_lock);
596 }
597
598 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
599 {
600         assert_rpm_wakelock_held(dev_priv);
601
602         spin_lock_irq(&dev_priv->irq_lock);
603         if (!dev_priv->guc.interrupts_enabled) {
604                 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
605                                        dev_priv->pm_guc_events);
606                 dev_priv->guc.interrupts_enabled = true;
607                 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
608         }
609         spin_unlock_irq(&dev_priv->irq_lock);
610 }
611
612 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
613 {
614         assert_rpm_wakelock_held(dev_priv);
615
616         spin_lock_irq(&dev_priv->irq_lock);
617         dev_priv->guc.interrupts_enabled = false;
618
619         gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
620
621         spin_unlock_irq(&dev_priv->irq_lock);
622         synchronize_irq(dev_priv->drm.irq);
623
624         gen9_reset_guc_interrupts(dev_priv);
625 }
626
627 /**
628  * bdw_update_port_irq - update DE port interrupt
629  * @dev_priv: driver private
630  * @interrupt_mask: mask of interrupt bits to update
631  * @enabled_irq_mask: mask of interrupt bits to enable
632  */
633 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
634                                 u32 interrupt_mask,
635                                 u32 enabled_irq_mask)
636 {
637         u32 new_val;
638         u32 old_val;
639
640         lockdep_assert_held(&dev_priv->irq_lock);
641
642         WARN_ON(enabled_irq_mask & ~interrupt_mask);
643
644         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
645                 return;
646
647         old_val = I915_READ(GEN8_DE_PORT_IMR);
648
649         new_val = old_val;
650         new_val &= ~interrupt_mask;
651         new_val |= (~enabled_irq_mask & interrupt_mask);
652
653         if (new_val != old_val) {
654                 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
655                 POSTING_READ(GEN8_DE_PORT_IMR);
656         }
657 }
658
659 /**
660  * bdw_update_pipe_irq - update DE pipe interrupt
661  * @dev_priv: driver private
662  * @pipe: pipe whose interrupt to update
663  * @interrupt_mask: mask of interrupt bits to update
664  * @enabled_irq_mask: mask of interrupt bits to enable
665  */
666 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
667                          enum pipe pipe,
668                          u32 interrupt_mask,
669                          u32 enabled_irq_mask)
670 {
671         u32 new_val;
672
673         lockdep_assert_held(&dev_priv->irq_lock);
674
675         WARN_ON(enabled_irq_mask & ~interrupt_mask);
676
677         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
678                 return;
679
680         new_val = dev_priv->de_irq_mask[pipe];
681         new_val &= ~interrupt_mask;
682         new_val |= (~enabled_irq_mask & interrupt_mask);
683
684         if (new_val != dev_priv->de_irq_mask[pipe]) {
685                 dev_priv->de_irq_mask[pipe] = new_val;
686                 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
687                 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
688         }
689 }
690
691 /**
692  * ibx_display_interrupt_update - update SDEIMR
693  * @dev_priv: driver private
694  * @interrupt_mask: mask of interrupt bits to update
695  * @enabled_irq_mask: mask of interrupt bits to enable
696  */
697 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
698                                   u32 interrupt_mask,
699                                   u32 enabled_irq_mask)
700 {
701         u32 sdeimr = I915_READ(SDEIMR);
702         sdeimr &= ~interrupt_mask;
703         sdeimr |= (~enabled_irq_mask & interrupt_mask);
704
705         WARN_ON(enabled_irq_mask & ~interrupt_mask);
706
707         lockdep_assert_held(&dev_priv->irq_lock);
708
709         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
710                 return;
711
712         I915_WRITE(SDEIMR, sdeimr);
713         POSTING_READ(SDEIMR);
714 }
715
716 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
717                               enum pipe pipe)
718 {
719         u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
720         u32 enable_mask = status_mask << 16;
721
722         lockdep_assert_held(&dev_priv->irq_lock);
723
724         if (INTEL_GEN(dev_priv) < 5)
725                 goto out;
726
727         /*
728          * On pipe A we don't support the PSR interrupt yet,
729          * on pipe B and C the same bit MBZ.
730          */
731         if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
732                 return 0;
733         /*
734          * On pipe B and C we don't support the PSR interrupt yet, on pipe
735          * A the same bit is for perf counters which we don't use either.
736          */
737         if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
738                 return 0;
739
740         enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
741                          SPRITE0_FLIP_DONE_INT_EN_VLV |
742                          SPRITE1_FLIP_DONE_INT_EN_VLV);
743         if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
744                 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
745         if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
746                 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
747
748 out:
749         WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
750                   status_mask & ~PIPESTAT_INT_STATUS_MASK,
751                   "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
752                   pipe_name(pipe), enable_mask, status_mask);
753
754         return enable_mask;
755 }
756
757 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
758                           enum pipe pipe, u32 status_mask)
759 {
760         i915_reg_t reg = PIPESTAT(pipe);
761         u32 enable_mask;
762
763         WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
764                   "pipe %c: status_mask=0x%x\n",
765                   pipe_name(pipe), status_mask);
766
767         lockdep_assert_held(&dev_priv->irq_lock);
768         WARN_ON(!intel_irqs_enabled(dev_priv));
769
770         if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
771                 return;
772
773         dev_priv->pipestat_irq_mask[pipe] |= status_mask;
774         enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
775
776         I915_WRITE(reg, enable_mask | status_mask);
777         POSTING_READ(reg);
778 }
779
780 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
781                            enum pipe pipe, u32 status_mask)
782 {
783         i915_reg_t reg = PIPESTAT(pipe);
784         u32 enable_mask;
785
786         WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
787                   "pipe %c: status_mask=0x%x\n",
788                   pipe_name(pipe), status_mask);
789
790         lockdep_assert_held(&dev_priv->irq_lock);
791         WARN_ON(!intel_irqs_enabled(dev_priv));
792
793         if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
794                 return;
795
796         dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
797         enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
798
799         I915_WRITE(reg, enable_mask | status_mask);
800         POSTING_READ(reg);
801 }
802
803 static bool i915_has_asle(struct drm_i915_private *dev_priv)
804 {
805         if (!dev_priv->opregion.asle)
806                 return false;
807
808         return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
809 }
810
811 /**
812  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
813  * @dev_priv: i915 device private
814  */
815 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
816 {
817         if (!i915_has_asle(dev_priv))
818                 return;
819
820         spin_lock_irq(&dev_priv->irq_lock);
821
822         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
823         if (INTEL_GEN(dev_priv) >= 4)
824                 i915_enable_pipestat(dev_priv, PIPE_A,
825                                      PIPE_LEGACY_BLC_EVENT_STATUS);
826
827         spin_unlock_irq(&dev_priv->irq_lock);
828 }
829
830 /*
831  * This timing diagram depicts the video signal in and
832  * around the vertical blanking period.
833  *
834  * Assumptions about the fictitious mode used in this example:
835  *  vblank_start >= 3
836  *  vsync_start = vblank_start + 1
837  *  vsync_end = vblank_start + 2
838  *  vtotal = vblank_start + 3
839  *
840  *           start of vblank:
841  *           latch double buffered registers
842  *           increment frame counter (ctg+)
843  *           generate start of vblank interrupt (gen4+)
844  *           |
845  *           |          frame start:
846  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
847  *           |          may be shifted forward 1-3 extra lines via PIPECONF
848  *           |          |
849  *           |          |  start of vsync:
850  *           |          |  generate vsync interrupt
851  *           |          |  |
852  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
853  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
854  * ----va---> <-----------------vb--------------------> <--------va-------------
855  *       |          |       <----vs----->                     |
856  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
857  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
858  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
859  *       |          |                                         |
860  *       last visible pixel                                   first visible pixel
861  *                  |                                         increment frame counter (gen3/4)
862  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
863  *
864  * x  = horizontal active
865  * _  = horizontal blanking
866  * hs = horizontal sync
867  * va = vertical active
868  * vb = vertical blanking
869  * vs = vertical sync
870  * vbs = vblank_start (number)
871  *
872  * Summary:
873  * - most events happen at the start of horizontal sync
874  * - frame start happens at the start of horizontal blank, 1-4 lines
875  *   (depending on PIPECONF settings) after the start of vblank
876  * - gen3/4 pixel and frame counter are synchronized with the start
877  *   of horizontal active on the first line of vertical active
878  */
879
880 /* Called from drm generic code, passed a 'crtc', which
881  * we use as a pipe index
882  */
883 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
884 {
885         struct drm_i915_private *dev_priv = to_i915(dev);
886         struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
887         const struct drm_display_mode *mode = &vblank->hwmode;
888         i915_reg_t high_frame, low_frame;
889         u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
890         unsigned long irqflags;
891
892         /*
893          * On i965gm TV output the frame counter only works up to
894          * the point when we enable the TV encoder. After that the
895          * frame counter ceases to work and reads zero. We need a
896          * vblank wait before enabling the TV encoder and so we
897          * have to enable vblank interrupts while the frame counter
898          * is still in a working state. However the core vblank code
899          * does not like us returning non-zero frame counter values
900          * when we've told it that we don't have a working frame
901          * counter. Thus we must stop non-zero values leaking out.
902          */
903         if (!vblank->max_vblank_count)
904                 return 0;
905
906         htotal = mode->crtc_htotal;
907         hsync_start = mode->crtc_hsync_start;
908         vbl_start = mode->crtc_vblank_start;
909         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
910                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
911
912         /* Convert to pixel count */
913         vbl_start *= htotal;
914
915         /* Start of vblank event occurs at start of hsync */
916         vbl_start -= htotal - hsync_start;
917
918         high_frame = PIPEFRAME(pipe);
919         low_frame = PIPEFRAMEPIXEL(pipe);
920
921         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
922
923         /*
924          * High & low register fields aren't synchronized, so make sure
925          * we get a low value that's stable across two reads of the high
926          * register.
927          */
928         do {
929                 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
930                 low   = I915_READ_FW(low_frame);
931                 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
932         } while (high1 != high2);
933
934         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
935
936         high1 >>= PIPE_FRAME_HIGH_SHIFT;
937         pixel = low & PIPE_PIXEL_MASK;
938         low >>= PIPE_FRAME_LOW_SHIFT;
939
940         /*
941          * The frame counter increments at beginning of active.
942          * Cook up a vblank counter by also checking the pixel
943          * counter against vblank start.
944          */
945         return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
946 }
947
948 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
949 {
950         struct drm_i915_private *dev_priv = to_i915(dev);
951
952         return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
953 }
954
955 /*
956  * On certain encoders on certain platforms, pipe
957  * scanline register will not work to get the scanline,
958  * since the timings are driven from the PORT or issues
959  * with scanline register updates.
960  * This function will use Framestamp and current
961  * timestamp registers to calculate the scanline.
962  */
963 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
964 {
965         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
966         struct drm_vblank_crtc *vblank =
967                 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
968         const struct drm_display_mode *mode = &vblank->hwmode;
969         u32 vblank_start = mode->crtc_vblank_start;
970         u32 vtotal = mode->crtc_vtotal;
971         u32 htotal = mode->crtc_htotal;
972         u32 clock = mode->crtc_clock;
973         u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
974
975         /*
976          * To avoid the race condition where we might cross into the
977          * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
978          * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
979          * during the same frame.
980          */
981         do {
982                 /*
983                  * This field provides read back of the display
984                  * pipe frame time stamp. The time stamp value
985                  * is sampled at every start of vertical blank.
986                  */
987                 scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
988
989                 /*
990                  * The TIMESTAMP_CTR register has the current
991                  * time stamp value.
992                  */
993                 scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
994
995                 scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
996         } while (scan_post_time != scan_prev_time);
997
998         scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
999                                         clock), 1000 * htotal);
1000         scanline = min(scanline, vtotal - 1);
1001         scanline = (scanline + vblank_start) % vtotal;
1002
1003         return scanline;
1004 }
1005
1006 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
1007 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
1008 {
1009         struct drm_device *dev = crtc->base.dev;
1010         struct drm_i915_private *dev_priv = to_i915(dev);
1011         const struct drm_display_mode *mode;
1012         struct drm_vblank_crtc *vblank;
1013         enum pipe pipe = crtc->pipe;
1014         int position, vtotal;
1015
1016         if (!crtc->active)
1017                 return -1;
1018
1019         vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
1020         mode = &vblank->hwmode;
1021
1022         if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
1023                 return __intel_get_crtc_scanline_from_timestamp(crtc);
1024
1025         vtotal = mode->crtc_vtotal;
1026         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1027                 vtotal /= 2;
1028
1029         if (IS_GEN(dev_priv, 2))
1030                 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
1031         else
1032                 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
1033
1034         /*
1035          * On HSW, the DSL reg (0x70000) appears to return 0 if we
1036          * read it just before the start of vblank.  So try it again
1037          * so we don't accidentally end up spanning a vblank frame
1038          * increment, causing the pipe_update_end() code to squak at us.
1039          *
1040          * The nature of this problem means we can't simply check the ISR
1041          * bit and return the vblank start value; nor can we use the scanline
1042          * debug register in the transcoder as it appears to have the same
1043          * problem.  We may need to extend this to include other platforms,
1044          * but so far testing only shows the problem on HSW.
1045          */
1046         if (HAS_DDI(dev_priv) && !position) {
1047                 int i, temp;
1048
1049                 for (i = 0; i < 100; i++) {
1050                         udelay(1);
1051                         temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
1052                         if (temp != position) {
1053                                 position = temp;
1054                                 break;
1055                         }
1056                 }
1057         }
1058
1059         /*
1060          * See update_scanline_offset() for the details on the
1061          * scanline_offset adjustment.
1062          */
1063         return (position + crtc->scanline_offset) % vtotal;
1064 }
1065
1066 static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
1067                                      bool in_vblank_irq, int *vpos, int *hpos,
1068                                      ktime_t *stime, ktime_t *etime,
1069                                      const struct drm_display_mode *mode)
1070 {
1071         struct drm_i915_private *dev_priv = to_i915(dev);
1072         struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
1073                                                                 pipe);
1074         int position;
1075         int vbl_start, vbl_end, hsync_start, htotal, vtotal;
1076         unsigned long irqflags;
1077         bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
1078                 IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
1079                 mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
1080
1081         if (WARN_ON(!mode->crtc_clock)) {
1082                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
1083                                  "pipe %c\n", pipe_name(pipe));
1084                 return false;
1085         }
1086
1087         htotal = mode->crtc_htotal;
1088         hsync_start = mode->crtc_hsync_start;
1089         vtotal = mode->crtc_vtotal;
1090         vbl_start = mode->crtc_vblank_start;
1091         vbl_end = mode->crtc_vblank_end;
1092
1093         if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1094                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
1095                 vbl_end /= 2;
1096                 vtotal /= 2;
1097         }
1098
1099         /*
1100          * Lock uncore.lock, as we will do multiple timing critical raw
1101          * register reads, potentially with preemption disabled, so the
1102          * following code must not block on uncore.lock.
1103          */
1104         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1105
1106         /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1107
1108         /* Get optional system timestamp before query. */
1109         if (stime)
1110                 *stime = ktime_get();
1111
1112         if (use_scanline_counter) {
1113                 /* No obvious pixelcount register. Only query vertical
1114                  * scanout position from Display scan line register.
1115                  */
1116                 position = __intel_get_crtc_scanline(intel_crtc);
1117         } else {
1118                 /* Have access to pixelcount since start of frame.
1119                  * We can split this into vertical and horizontal
1120                  * scanout position.
1121                  */
1122                 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
1123
1124                 /* convert to pixel counts */
1125                 vbl_start *= htotal;
1126                 vbl_end *= htotal;
1127                 vtotal *= htotal;
1128
1129                 /*
1130                  * In interlaced modes, the pixel counter counts all pixels,
1131                  * so one field will have htotal more pixels. In order to avoid
1132                  * the reported position from jumping backwards when the pixel
1133                  * counter is beyond the length of the shorter field, just
1134                  * clamp the position the length of the shorter field. This
1135                  * matches how the scanline counter based position works since
1136                  * the scanline counter doesn't count the two half lines.
1137                  */
1138                 if (position >= vtotal)
1139                         position = vtotal - 1;
1140
1141                 /*
1142                  * Start of vblank interrupt is triggered at start of hsync,
1143                  * just prior to the first active line of vblank. However we
1144                  * consider lines to start at the leading edge of horizontal
1145                  * active. So, should we get here before we've crossed into
1146                  * the horizontal active of the first line in vblank, we would
1147                  * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
1148                  * always add htotal-hsync_start to the current pixel position.
1149                  */
1150                 position = (position + htotal - hsync_start) % vtotal;
1151         }
1152
1153         /* Get optional system timestamp after query. */
1154         if (etime)
1155                 *etime = ktime_get();
1156
1157         /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1158
1159         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1160
1161         /*
1162          * While in vblank, position will be negative
1163          * counting up towards 0 at vbl_end. And outside
1164          * vblank, position will be positive counting
1165          * up since vbl_end.
1166          */
1167         if (position >= vbl_start)
1168                 position -= vbl_end;
1169         else
1170                 position += vtotal - vbl_end;
1171
1172         if (use_scanline_counter) {
1173                 *vpos = position;
1174                 *hpos = 0;
1175         } else {
1176                 *vpos = position / htotal;
1177                 *hpos = position - (*vpos * htotal);
1178         }
1179
1180         return true;
1181 }
1182
1183 int intel_get_crtc_scanline(struct intel_crtc *crtc)
1184 {
1185         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1186         unsigned long irqflags;
1187         int position;
1188
1189         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1190         position = __intel_get_crtc_scanline(crtc);
1191         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1192
1193         return position;
1194 }
1195
1196 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
1197 {
1198         u32 busy_up, busy_down, max_avg, min_avg;
1199         u8 new_delay;
1200
1201         spin_lock(&mchdev_lock);
1202
1203         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1204
1205         new_delay = dev_priv->ips.cur_delay;
1206
1207         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1208         busy_up = I915_READ(RCPREVBSYTUPAVG);
1209         busy_down = I915_READ(RCPREVBSYTDNAVG);
1210         max_avg = I915_READ(RCBMAXAVG);
1211         min_avg = I915_READ(RCBMINAVG);
1212
1213         /* Handle RCS change request from hw */
1214         if (busy_up > max_avg) {
1215                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1216                         new_delay = dev_priv->ips.cur_delay - 1;
1217                 if (new_delay < dev_priv->ips.max_delay)
1218                         new_delay = dev_priv->ips.max_delay;
1219         } else if (busy_down < min_avg) {
1220                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1221                         new_delay = dev_priv->ips.cur_delay + 1;
1222                 if (new_delay > dev_priv->ips.min_delay)
1223                         new_delay = dev_priv->ips.min_delay;
1224         }
1225
1226         if (ironlake_set_drps(dev_priv, new_delay))
1227                 dev_priv->ips.cur_delay = new_delay;
1228
1229         spin_unlock(&mchdev_lock);
1230
1231         return;
1232 }
1233
1234 static void vlv_c0_read(struct drm_i915_private *dev_priv,
1235                         struct intel_rps_ei *ei)
1236 {
1237         ei->ktime = ktime_get_raw();
1238         ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1239         ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1240 }
1241
1242 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1243 {
1244         memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
1245 }
1246
1247 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1248 {
1249         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1250         const struct intel_rps_ei *prev = &rps->ei;
1251         struct intel_rps_ei now;
1252         u32 events = 0;
1253
1254         if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1255                 return 0;
1256
1257         vlv_c0_read(dev_priv, &now);
1258
1259         if (prev->ktime) {
1260                 u64 time, c0;
1261                 u32 render, media;
1262
1263                 time = ktime_us_delta(now.ktime, prev->ktime);
1264
1265                 time *= dev_priv->czclk_freq;
1266
1267                 /* Workload can be split between render + media,
1268                  * e.g. SwapBuffers being blitted in X after being rendered in
1269                  * mesa. To account for this we need to combine both engines
1270                  * into our activity counter.
1271                  */
1272                 render = now.render_c0 - prev->render_c0;
1273                 media = now.media_c0 - prev->media_c0;
1274                 c0 = max(render, media);
1275                 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1276
1277                 if (c0 > time * rps->power.up_threshold)
1278                         events = GEN6_PM_RP_UP_THRESHOLD;
1279                 else if (c0 < time * rps->power.down_threshold)
1280                         events = GEN6_PM_RP_DOWN_THRESHOLD;
1281         }
1282
1283         rps->ei = now;
1284         return events;
1285 }
1286
1287 static void gen6_pm_rps_work(struct work_struct *work)
1288 {
1289         struct drm_i915_private *dev_priv =
1290                 container_of(work, struct drm_i915_private, gt_pm.rps.work);
1291         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1292         bool client_boost = false;
1293         int new_delay, adj, min, max;
1294         u32 pm_iir = 0;
1295
1296         spin_lock_irq(&dev_priv->irq_lock);
1297         if (rps->interrupts_enabled) {
1298                 pm_iir = fetch_and_zero(&rps->pm_iir);
1299                 client_boost = atomic_read(&rps->num_waiters);
1300         }
1301         spin_unlock_irq(&dev_priv->irq_lock);
1302
1303         /* Make sure we didn't queue anything we're not going to process. */
1304         WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1305         if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1306                 goto out;
1307
1308         mutex_lock(&rps->lock);
1309
1310         pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1311
1312         adj = rps->last_adj;
1313         new_delay = rps->cur_freq;
1314         min = rps->min_freq_softlimit;
1315         max = rps->max_freq_softlimit;
1316         if (client_boost)
1317                 max = rps->max_freq;
1318         if (client_boost && new_delay < rps->boost_freq) {
1319                 new_delay = rps->boost_freq;
1320                 adj = 0;
1321         } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1322                 if (adj > 0)
1323                         adj *= 2;
1324                 else /* CHV needs even encode values */
1325                         adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1326
1327                 if (new_delay >= rps->max_freq_softlimit)
1328                         adj = 0;
1329         } else if (client_boost) {
1330                 adj = 0;
1331         } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1332                 if (rps->cur_freq > rps->efficient_freq)
1333                         new_delay = rps->efficient_freq;
1334                 else if (rps->cur_freq > rps->min_freq_softlimit)
1335                         new_delay = rps->min_freq_softlimit;
1336                 adj = 0;
1337         } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1338                 if (adj < 0)
1339                         adj *= 2;
1340                 else /* CHV needs even encode values */
1341                         adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1342
1343                 if (new_delay <= rps->min_freq_softlimit)
1344                         adj = 0;
1345         } else { /* unknown event */
1346                 adj = 0;
1347         }
1348
1349         rps->last_adj = adj;
1350
1351         /*
1352          * Limit deboosting and boosting to keep ourselves at the extremes
1353          * when in the respective power modes (i.e. slowly decrease frequencies
1354          * while in the HIGH_POWER zone and slowly increase frequencies while
1355          * in the LOW_POWER zone). On idle, we will hit the timeout and drop
1356          * to the next level quickly, and conversely if busy we expect to
1357          * hit a waitboost and rapidly switch into max power.
1358          */
1359         if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
1360             (adj > 0 && rps->power.mode == LOW_POWER))
1361                 rps->last_adj = 0;
1362
1363         /* sysfs frequency interfaces may have snuck in while servicing the
1364          * interrupt
1365          */
1366         new_delay += adj;
1367         new_delay = clamp_t(int, new_delay, min, max);
1368
1369         if (intel_set_rps(dev_priv, new_delay)) {
1370                 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1371                 rps->last_adj = 0;
1372         }
1373
1374         mutex_unlock(&rps->lock);
1375
1376 out:
1377         /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1378         spin_lock_irq(&dev_priv->irq_lock);
1379         if (rps->interrupts_enabled)
1380                 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
1381         spin_unlock_irq(&dev_priv->irq_lock);
1382 }
1383
1384
1385 /**
1386  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1387  * occurred.
1388  * @work: workqueue struct
1389  *
1390  * Doesn't actually do anything except notify userspace. As a consequence of
1391  * this event, userspace should try to remap the bad rows since statistically
1392  * it is likely the same row is more likely to go bad again.
1393  */
1394 static void ivybridge_parity_work(struct work_struct *work)
1395 {
1396         struct drm_i915_private *dev_priv =
1397                 container_of(work, typeof(*dev_priv), l3_parity.error_work);
1398         u32 error_status, row, bank, subbank;
1399         char *parity_event[6];
1400         u32 misccpctl;
1401         u8 slice = 0;
1402
1403         /* We must turn off DOP level clock gating to access the L3 registers.
1404          * In order to prevent a get/put style interface, acquire struct mutex
1405          * any time we access those registers.
1406          */
1407         mutex_lock(&dev_priv->drm.struct_mutex);
1408
1409         /* If we've screwed up tracking, just let the interrupt fire again */
1410         if (WARN_ON(!dev_priv->l3_parity.which_slice))
1411                 goto out;
1412
1413         misccpctl = I915_READ(GEN7_MISCCPCTL);
1414         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1415         POSTING_READ(GEN7_MISCCPCTL);
1416
1417         while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1418                 i915_reg_t reg;
1419
1420                 slice--;
1421                 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1422                         break;
1423
1424                 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1425
1426                 reg = GEN7_L3CDERRST1(slice);
1427
1428                 error_status = I915_READ(reg);
1429                 row = GEN7_PARITY_ERROR_ROW(error_status);
1430                 bank = GEN7_PARITY_ERROR_BANK(error_status);
1431                 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1432
1433                 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1434                 POSTING_READ(reg);
1435
1436                 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1437                 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1438                 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1439                 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1440                 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1441                 parity_event[5] = NULL;
1442
1443                 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1444                                    KOBJ_CHANGE, parity_event);
1445
1446                 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1447                           slice, row, bank, subbank);
1448
1449                 kfree(parity_event[4]);
1450                 kfree(parity_event[3]);
1451                 kfree(parity_event[2]);
1452                 kfree(parity_event[1]);
1453         }
1454
1455         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1456
1457 out:
1458         WARN_ON(dev_priv->l3_parity.which_slice);
1459         spin_lock_irq(&dev_priv->irq_lock);
1460         gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1461         spin_unlock_irq(&dev_priv->irq_lock);
1462
1463         mutex_unlock(&dev_priv->drm.struct_mutex);
1464 }
1465
1466 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1467                                                u32 iir)
1468 {
1469         if (!HAS_L3_DPF(dev_priv))
1470                 return;
1471
1472         spin_lock(&dev_priv->irq_lock);
1473         gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1474         spin_unlock(&dev_priv->irq_lock);
1475
1476         iir &= GT_PARITY_ERROR(dev_priv);
1477         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1478                 dev_priv->l3_parity.which_slice |= 1 << 1;
1479
1480         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1481                 dev_priv->l3_parity.which_slice |= 1 << 0;
1482
1483         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1484 }
1485
1486 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1487                                u32 gt_iir)
1488 {
1489         if (gt_iir & GT_RENDER_USER_INTERRUPT)
1490                 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
1491         if (gt_iir & ILK_BSD_USER_INTERRUPT)
1492                 intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
1493 }
1494
1495 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1496                                u32 gt_iir)
1497 {
1498         if (gt_iir & GT_RENDER_USER_INTERRUPT)
1499                 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
1500         if (gt_iir & GT_BSD_USER_INTERRUPT)
1501                 intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
1502         if (gt_iir & GT_BLT_USER_INTERRUPT)
1503                 intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]);
1504
1505         if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1506                       GT_BSD_CS_ERROR_INTERRUPT |
1507                       GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1508                 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1509
1510         if (gt_iir & GT_PARITY_ERROR(dev_priv))
1511                 ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1512 }
1513
1514 static void
1515 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
1516 {
1517         bool tasklet = false;
1518
1519         if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
1520                 tasklet = true;
1521
1522         if (iir & GT_RENDER_USER_INTERRUPT) {
1523                 intel_engine_breadcrumbs_irq(engine);
1524                 tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
1525         }
1526
1527         if (tasklet)
1528                 tasklet_hi_schedule(&engine->execlists.tasklet);
1529 }
1530
1531 static void gen8_gt_irq_ack(struct drm_i915_private *i915,
1532                             u32 master_ctl, u32 gt_iir[4])
1533 {
1534         void __iomem * const regs = i915->uncore.regs;
1535
1536 #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
1537                       GEN8_GT_BCS_IRQ | \
1538                       GEN8_GT_VCS0_IRQ | \
1539                       GEN8_GT_VCS1_IRQ | \
1540                       GEN8_GT_VECS_IRQ | \
1541                       GEN8_GT_PM_IRQ | \
1542                       GEN8_GT_GUC_IRQ)
1543
1544         if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1545                 gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
1546                 if (likely(gt_iir[0]))
1547                         raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
1548         }
1549
1550         if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
1551                 gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
1552                 if (likely(gt_iir[1]))
1553                         raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
1554         }
1555
1556         if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1557                 gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
1558                 if (likely(gt_iir[2]))
1559                         raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
1560         }
1561
1562         if (master_ctl & GEN8_GT_VECS_IRQ) {
1563                 gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
1564                 if (likely(gt_iir[3]))
1565                         raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
1566         }
1567 }
1568
1569 static void gen8_gt_irq_handler(struct drm_i915_private *i915,
1570                                 u32 master_ctl, u32 gt_iir[4])
1571 {
1572         if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1573                 gen8_cs_irq_handler(i915->engine[RCS0],
1574                                     gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
1575                 gen8_cs_irq_handler(i915->engine[BCS0],
1576                                     gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
1577         }
1578
1579         if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
1580                 gen8_cs_irq_handler(i915->engine[VCS0],
1581                                     gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
1582                 gen8_cs_irq_handler(i915->engine[VCS1],
1583                                     gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
1584         }
1585
1586         if (master_ctl & GEN8_GT_VECS_IRQ) {
1587                 gen8_cs_irq_handler(i915->engine[VECS0],
1588                                     gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
1589         }
1590
1591         if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1592                 gen6_rps_irq_handler(i915, gt_iir[2]);
1593                 gen9_guc_irq_handler(i915, gt_iir[2]);
1594         }
1595 }
1596
1597 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1598 {
1599         switch (pin) {
1600         case HPD_PORT_C:
1601                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1602         case HPD_PORT_D:
1603                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1604         case HPD_PORT_E:
1605                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1606         case HPD_PORT_F:
1607                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1608         default:
1609                 return false;
1610         }
1611 }
1612
1613 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1614 {
1615         switch (pin) {
1616         case HPD_PORT_A:
1617                 return val & PORTA_HOTPLUG_LONG_DETECT;
1618         case HPD_PORT_B:
1619                 return val & PORTB_HOTPLUG_LONG_DETECT;
1620         case HPD_PORT_C:
1621                 return val & PORTC_HOTPLUG_LONG_DETECT;
1622         default:
1623                 return false;
1624         }
1625 }
1626
1627 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1628 {
1629         switch (pin) {
1630         case HPD_PORT_A:
1631                 return val & ICP_DDIA_HPD_LONG_DETECT;
1632         case HPD_PORT_B:
1633                 return val & ICP_DDIB_HPD_LONG_DETECT;
1634         default:
1635                 return false;
1636         }
1637 }
1638
1639 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1640 {
1641         switch (pin) {
1642         case HPD_PORT_C:
1643                 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1644         case HPD_PORT_D:
1645                 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1646         case HPD_PORT_E:
1647                 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1648         case HPD_PORT_F:
1649                 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1650         default:
1651                 return false;
1652         }
1653 }
1654
1655 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1656 {
1657         switch (pin) {
1658         case HPD_PORT_E:
1659                 return val & PORTE_HOTPLUG_LONG_DETECT;
1660         default:
1661                 return false;
1662         }
1663 }
1664
1665 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1666 {
1667         switch (pin) {
1668         case HPD_PORT_A:
1669                 return val & PORTA_HOTPLUG_LONG_DETECT;
1670         case HPD_PORT_B:
1671                 return val & PORTB_HOTPLUG_LONG_DETECT;
1672         case HPD_PORT_C:
1673                 return val & PORTC_HOTPLUG_LONG_DETECT;
1674         case HPD_PORT_D:
1675                 return val & PORTD_HOTPLUG_LONG_DETECT;
1676         default:
1677                 return false;
1678         }
1679 }
1680
1681 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1682 {
1683         switch (pin) {
1684         case HPD_PORT_A:
1685                 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1686         default:
1687                 return false;
1688         }
1689 }
1690
1691 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1692 {
1693         switch (pin) {
1694         case HPD_PORT_B:
1695                 return val & PORTB_HOTPLUG_LONG_DETECT;
1696         case HPD_PORT_C:
1697                 return val & PORTC_HOTPLUG_LONG_DETECT;
1698         case HPD_PORT_D:
1699                 return val & PORTD_HOTPLUG_LONG_DETECT;
1700         default:
1701                 return false;
1702         }
1703 }
1704
1705 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1706 {
1707         switch (pin) {
1708         case HPD_PORT_B:
1709                 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1710         case HPD_PORT_C:
1711                 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1712         case HPD_PORT_D:
1713                 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1714         default:
1715                 return false;
1716         }
1717 }
1718
1719 /*
1720  * Get a bit mask of pins that have triggered, and which ones may be long.
1721  * This can be called multiple times with the same masks to accumulate
1722  * hotplug detection results from several registers.
1723  *
1724  * Note that the caller is expected to zero out the masks initially.
1725  */
1726 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1727                                u32 *pin_mask, u32 *long_mask,
1728                                u32 hotplug_trigger, u32 dig_hotplug_reg,
1729                                const u32 hpd[HPD_NUM_PINS],
1730                                bool long_pulse_detect(enum hpd_pin pin, u32 val))
1731 {
1732         enum hpd_pin pin;
1733
1734         for_each_hpd_pin(pin) {
1735                 if ((hpd[pin] & hotplug_trigger) == 0)
1736                         continue;
1737
1738                 *pin_mask |= BIT(pin);
1739
1740                 if (long_pulse_detect(pin, dig_hotplug_reg))
1741                         *long_mask |= BIT(pin);
1742         }
1743
1744         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1745                          hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1746
1747 }
1748
1749 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1750 {
1751         wake_up_all(&dev_priv->gmbus_wait_queue);
1752 }
1753
1754 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1755 {
1756         wake_up_all(&dev_priv->gmbus_wait_queue);
1757 }
1758
1759 #if defined(CONFIG_DEBUG_FS)
1760 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1761                                          enum pipe pipe,
1762                                          u32 crc0, u32 crc1,
1763                                          u32 crc2, u32 crc3,
1764                                          u32 crc4)
1765 {
1766         struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1767         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1768         u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1769
1770         trace_intel_pipe_crc(crtc, crcs);
1771
1772         spin_lock(&pipe_crc->lock);
1773         /*
1774          * For some not yet identified reason, the first CRC is
1775          * bonkers. So let's just wait for the next vblank and read
1776          * out the buggy result.
1777          *
1778          * On GEN8+ sometimes the second CRC is bonkers as well, so
1779          * don't trust that one either.
1780          */
1781         if (pipe_crc->skipped <= 0 ||
1782             (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1783                 pipe_crc->skipped++;
1784                 spin_unlock(&pipe_crc->lock);
1785                 return;
1786         }
1787         spin_unlock(&pipe_crc->lock);
1788
1789         drm_crtc_add_crc_entry(&crtc->base, true,
1790                                 drm_crtc_accurate_vblank_count(&crtc->base),
1791                                 crcs);
1792 }
1793 #else
1794 static inline void
1795 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1796                              enum pipe pipe,
1797                              u32 crc0, u32 crc1,
1798                              u32 crc2, u32 crc3,
1799                              u32 crc4) {}
1800 #endif
1801
1802
1803 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1804                                      enum pipe pipe)
1805 {
1806         display_pipe_crc_irq_handler(dev_priv, pipe,
1807                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1808                                      0, 0, 0, 0);
1809 }
1810
1811 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1812                                      enum pipe pipe)
1813 {
1814         display_pipe_crc_irq_handler(dev_priv, pipe,
1815                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1816                                      I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1817                                      I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1818                                      I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1819                                      I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1820 }
1821
1822 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1823                                       enum pipe pipe)
1824 {
1825         u32 res1, res2;
1826
1827         if (INTEL_GEN(dev_priv) >= 3)
1828                 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1829         else
1830                 res1 = 0;
1831
1832         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1833                 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1834         else
1835                 res2 = 0;
1836
1837         display_pipe_crc_irq_handler(dev_priv, pipe,
1838                                      I915_READ(PIPE_CRC_RES_RED(pipe)),
1839                                      I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1840                                      I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1841                                      res1, res2);
1842 }
1843
1844 /* The RPS events need forcewake, so we add them to a work queue and mask their
1845  * IMR bits until the work is done. Other interrupts can be processed without
1846  * the work queue. */
1847 static void gen11_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir)
1848 {
1849         struct intel_rps *rps = &i915->gt_pm.rps;
1850         const u32 events = i915->pm_rps_events & pm_iir;
1851
1852         lockdep_assert_held(&i915->irq_lock);
1853
1854         if (unlikely(!events))
1855                 return;
1856
1857         gen6_mask_pm_irq(i915, events);
1858
1859         if (!rps->interrupts_enabled)
1860                 return;
1861
1862         rps->pm_iir |= events;
1863         schedule_work(&rps->work);
1864 }
1865
1866 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1867 {
1868         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1869
1870         if (pm_iir & dev_priv->pm_rps_events) {
1871                 spin_lock(&dev_priv->irq_lock);
1872                 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1873                 if (rps->interrupts_enabled) {
1874                         rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
1875                         schedule_work(&rps->work);
1876                 }
1877                 spin_unlock(&dev_priv->irq_lock);
1878         }
1879
1880         if (INTEL_GEN(dev_priv) >= 8)
1881                 return;
1882
1883         if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1884                 intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
1885
1886         if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1887                 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1888 }
1889
1890 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
1891 {
1892         if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT)
1893                 intel_guc_to_host_event_handler(&dev_priv->guc);
1894 }
1895
1896 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1897 {
1898         enum pipe pipe;
1899
1900         for_each_pipe(dev_priv, pipe) {
1901                 I915_WRITE(PIPESTAT(pipe),
1902                            PIPESTAT_INT_STATUS_MASK |
1903                            PIPE_FIFO_UNDERRUN_STATUS);
1904
1905                 dev_priv->pipestat_irq_mask[pipe] = 0;
1906         }
1907 }
1908
1909 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1910                                   u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1911 {
1912         int pipe;
1913
1914         spin_lock(&dev_priv->irq_lock);
1915
1916         if (!dev_priv->display_irqs_enabled) {
1917                 spin_unlock(&dev_priv->irq_lock);
1918                 return;
1919         }
1920
1921         for_each_pipe(dev_priv, pipe) {
1922                 i915_reg_t reg;
1923                 u32 status_mask, enable_mask, iir_bit = 0;
1924
1925                 /*
1926                  * PIPESTAT bits get signalled even when the interrupt is
1927                  * disabled with the mask bits, and some of the status bits do
1928                  * not generate interrupts at all (like the underrun bit). Hence
1929                  * we need to be careful that we only handle what we want to
1930                  * handle.
1931                  */
1932
1933                 /* fifo underruns are filterered in the underrun handler. */
1934                 status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1935
1936                 switch (pipe) {
1937                 case PIPE_A:
1938                         iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1939                         break;
1940                 case PIPE_B:
1941                         iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1942                         break;
1943                 case PIPE_C:
1944                         iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1945                         break;
1946                 }
1947                 if (iir & iir_bit)
1948                         status_mask |= dev_priv->pipestat_irq_mask[pipe];
1949
1950                 if (!status_mask)
1951                         continue;
1952
1953                 reg = PIPESTAT(pipe);
1954                 pipe_stats[pipe] = I915_READ(reg) & status_mask;
1955                 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1956
1957                 /*
1958                  * Clear the PIPE*STAT regs before the IIR
1959                  *
1960                  * Toggle the enable bits to make sure we get an
1961                  * edge in the ISR pipe event bit if we don't clear
1962                  * all the enabled status bits. Otherwise the edge
1963                  * triggered IIR on i965/g4x wouldn't notice that
1964                  * an interrupt is still pending.
1965                  */
1966                 if (pipe_stats[pipe]) {
1967                         I915_WRITE(reg, pipe_stats[pipe]);
1968                         I915_WRITE(reg, enable_mask);
1969                 }
1970         }
1971         spin_unlock(&dev_priv->irq_lock);
1972 }
1973
1974 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1975                                       u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1976 {
1977         enum pipe pipe;
1978
1979         for_each_pipe(dev_priv, pipe) {
1980                 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1981                         drm_handle_vblank(&dev_priv->drm, pipe);
1982
1983                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1984                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1985
1986                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1987                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1988         }
1989 }
1990
1991 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1992                                       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1993 {
1994         bool blc_event = false;
1995         enum pipe pipe;
1996
1997         for_each_pipe(dev_priv, pipe) {
1998                 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1999                         drm_handle_vblank(&dev_priv->drm, pipe);
2000
2001                 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2002                         blc_event = true;
2003
2004                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2005                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2006
2007                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2008                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2009         }
2010
2011         if (blc_event || (iir & I915_ASLE_INTERRUPT))
2012                 intel_opregion_asle_intr(dev_priv);
2013 }
2014
2015 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2016                                       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
2017 {
2018         bool blc_event = false;
2019         enum pipe pipe;
2020
2021         for_each_pipe(dev_priv, pipe) {
2022                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2023                         drm_handle_vblank(&dev_priv->drm, pipe);
2024
2025                 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2026                         blc_event = true;
2027
2028                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2029                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2030
2031                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2032                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2033         }
2034
2035         if (blc_event || (iir & I915_ASLE_INTERRUPT))
2036                 intel_opregion_asle_intr(dev_priv);
2037
2038         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2039                 gmbus_irq_handler(dev_priv);
2040 }
2041
2042 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2043                                             u32 pipe_stats[I915_MAX_PIPES])
2044 {
2045         enum pipe pipe;
2046
2047         for_each_pipe(dev_priv, pipe) {
2048                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2049                         drm_handle_vblank(&dev_priv->drm, pipe);
2050
2051                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2052                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2053
2054                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2055                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2056         }
2057
2058         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2059                 gmbus_irq_handler(dev_priv);
2060 }
2061
2062 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
2063 {
2064         u32 hotplug_status = 0, hotplug_status_mask;
2065         int i;
2066
2067         if (IS_G4X(dev_priv) ||
2068             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2069                 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
2070                         DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
2071         else
2072                 hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
2073
2074         /*
2075          * We absolutely have to clear all the pending interrupt
2076          * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
2077          * interrupt bit won't have an edge, and the i965/g4x
2078          * edge triggered IIR will not notice that an interrupt
2079          * is still pending. We can't use PORT_HOTPLUG_EN to
2080          * guarantee the edge as the act of toggling the enable
2081          * bits can itself generate a new hotplug interrupt :(
2082          */
2083         for (i = 0; i < 10; i++) {
2084                 u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
2085
2086                 if (tmp == 0)
2087                         return hotplug_status;
2088
2089                 hotplug_status |= tmp;
2090                 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2091         }
2092
2093         WARN_ONCE(1,
2094                   "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
2095                   I915_READ(PORT_HOTPLUG_STAT));
2096
2097         return hotplug_status;
2098 }
2099
2100 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2101                                  u32 hotplug_status)
2102 {
2103         u32 pin_mask = 0, long_mask = 0;
2104
2105         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
2106             IS_CHERRYVIEW(dev_priv)) {
2107                 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
2108
2109                 if (hotplug_trigger) {
2110                         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2111                                            hotplug_trigger, hotplug_trigger,
2112                                            hpd_status_g4x,
2113                                            i9xx_port_hotplug_long_detect);
2114
2115                         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2116                 }
2117
2118                 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
2119                         dp_aux_irq_handler(dev_priv);
2120         } else {
2121                 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2122
2123                 if (hotplug_trigger) {
2124                         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2125                                            hotplug_trigger, hotplug_trigger,
2126                                            hpd_status_i915,
2127                                            i9xx_port_hotplug_long_detect);
2128                         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2129                 }
2130         }
2131 }
2132
2133 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
2134 {
2135         struct drm_device *dev = arg;
2136         struct drm_i915_private *dev_priv = to_i915(dev);
2137         irqreturn_t ret = IRQ_NONE;
2138
2139         if (!intel_irqs_enabled(dev_priv))
2140                 return IRQ_NONE;
2141
2142         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2143         disable_rpm_wakeref_asserts(dev_priv);
2144
2145         do {
2146                 u32 iir, gt_iir, pm_iir;
2147                 u32 pipe_stats[I915_MAX_PIPES] = {};
2148                 u32 hotplug_status = 0;
2149                 u32 ier = 0;
2150
2151                 gt_iir = I915_READ(GTIIR);
2152                 pm_iir = I915_READ(GEN6_PMIIR);
2153                 iir = I915_READ(VLV_IIR);
2154
2155                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
2156                         break;
2157
2158                 ret = IRQ_HANDLED;
2159
2160                 /*
2161                  * Theory on interrupt generation, based on empirical evidence:
2162                  *
2163                  * x = ((VLV_IIR & VLV_IER) ||
2164                  *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
2165                  *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
2166                  *
2167                  * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2168                  * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
2169                  * guarantee the CPU interrupt will be raised again even if we
2170                  * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
2171                  * bits this time around.
2172                  */
2173                 I915_WRITE(VLV_MASTER_IER, 0);
2174                 ier = I915_READ(VLV_IER);
2175                 I915_WRITE(VLV_IER, 0);
2176
2177                 if (gt_iir)
2178                         I915_WRITE(GTIIR, gt_iir);
2179                 if (pm_iir)
2180                         I915_WRITE(GEN6_PMIIR, pm_iir);
2181
2182                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2183                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2184
2185                 /* Call regardless, as some status bits might not be
2186                  * signalled in iir */
2187                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2188
2189                 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2190                            I915_LPE_PIPE_B_INTERRUPT))
2191                         intel_lpe_audio_irq_handler(dev_priv);
2192
2193                 /*
2194                  * VLV_IIR is single buffered, and reflects the level
2195                  * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
2196                  */
2197                 if (iir)
2198                         I915_WRITE(VLV_IIR, iir);
2199
2200                 I915_WRITE(VLV_IER, ier);
2201                 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2202
2203                 if (gt_iir)
2204                         snb_gt_irq_handler(dev_priv, gt_iir);
2205                 if (pm_iir)
2206                         gen6_rps_irq_handler(dev_priv, pm_iir);
2207
2208                 if (hotplug_status)
2209                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2210
2211                 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2212         } while (0);
2213
2214         enable_rpm_wakeref_asserts(dev_priv);
2215
2216         return ret;
2217 }
2218
2219 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
2220 {
2221         struct drm_device *dev = arg;
2222         struct drm_i915_private *dev_priv = to_i915(dev);
2223         irqreturn_t ret = IRQ_NONE;
2224
2225         if (!intel_irqs_enabled(dev_priv))
2226                 return IRQ_NONE;
2227
2228         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2229         disable_rpm_wakeref_asserts(dev_priv);
2230
2231         do {
2232                 u32 master_ctl, iir;
2233                 u32 pipe_stats[I915_MAX_PIPES] = {};
2234                 u32 hotplug_status = 0;
2235                 u32 gt_iir[4];
2236                 u32 ier = 0;
2237
2238                 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
2239                 iir = I915_READ(VLV_IIR);
2240
2241                 if (master_ctl == 0 && iir == 0)
2242                         break;
2243
2244                 ret = IRQ_HANDLED;
2245
2246                 /*
2247                  * Theory on interrupt generation, based on empirical evidence:
2248                  *
2249                  * x = ((VLV_IIR & VLV_IER) ||
2250                  *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
2251                  *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
2252                  *
2253                  * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2254                  * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
2255                  * guarantee the CPU interrupt will be raised again even if we
2256                  * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
2257                  * bits this time around.
2258                  */
2259                 I915_WRITE(GEN8_MASTER_IRQ, 0);
2260                 ier = I915_READ(VLV_IER);
2261                 I915_WRITE(VLV_IER, 0);
2262
2263                 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2264
2265                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2266                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2267
2268                 /* Call regardless, as some status bits might not be
2269                  * signalled in iir */
2270                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2271
2272                 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2273                            I915_LPE_PIPE_B_INTERRUPT |
2274                            I915_LPE_PIPE_C_INTERRUPT))
2275                         intel_lpe_audio_irq_handler(dev_priv);
2276
2277                 /*
2278                  * VLV_IIR is single buffered, and reflects the level
2279                  * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
2280                  */
2281                 if (iir)
2282                         I915_WRITE(VLV_IIR, iir);
2283
2284                 I915_WRITE(VLV_IER, ier);
2285                 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2286
2287                 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2288
2289                 if (hotplug_status)
2290                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2291
2292                 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2293         } while (0);
2294
2295         enable_rpm_wakeref_asserts(dev_priv);
2296
2297         return ret;
2298 }
2299
2300 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2301                                 u32 hotplug_trigger,
2302                                 const u32 hpd[HPD_NUM_PINS])
2303 {
2304         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2305
2306         /*
2307          * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
2308          * unless we touch the hotplug register, even if hotplug_trigger is
2309          * zero. Not acking leads to "The master control interrupt lied (SDE)!"
2310          * errors.
2311          */
2312         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2313         if (!hotplug_trigger) {
2314                 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
2315                         PORTD_HOTPLUG_STATUS_MASK |
2316                         PORTC_HOTPLUG_STATUS_MASK |
2317                         PORTB_HOTPLUG_STATUS_MASK;
2318                 dig_hotplug_reg &= ~mask;
2319         }
2320
2321         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2322         if (!hotplug_trigger)
2323                 return;
2324
2325         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2326                            dig_hotplug_reg, hpd,
2327                            pch_port_hotplug_long_detect);
2328
2329         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2330 }
2331
2332 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2333 {
2334         int pipe;
2335         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2336
2337         ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
2338
2339         if (pch_iir & SDE_AUDIO_POWER_MASK) {
2340                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2341                                SDE_AUDIO_POWER_SHIFT);
2342                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2343                                  port_name(port));
2344         }
2345
2346         if (pch_iir & SDE_AUX_MASK)
2347                 dp_aux_irq_handler(dev_priv);
2348
2349         if (pch_iir & SDE_GMBUS)
2350                 gmbus_irq_handler(dev_priv);
2351
2352         if (pch_iir & SDE_AUDIO_HDCP_MASK)
2353                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2354
2355         if (pch_iir & SDE_AUDIO_TRANS_MASK)
2356                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2357
2358         if (pch_iir & SDE_POISON)
2359                 DRM_ERROR("PCH poison interrupt\n");
2360
2361         if (pch_iir & SDE_FDI_MASK)
2362                 for_each_pipe(dev_priv, pipe)
2363                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2364                                          pipe_name(pipe),
2365                                          I915_READ(FDI_RX_IIR(pipe)));
2366
2367         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2368                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2369
2370         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2371                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2372
2373         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2374                 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
2375
2376         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2377                 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
2378 }
2379
2380 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2381 {
2382         u32 err_int = I915_READ(GEN7_ERR_INT);
2383         enum pipe pipe;
2384
2385         if (err_int & ERR_INT_POISON)
2386                 DRM_ERROR("Poison interrupt\n");
2387
2388         for_each_pipe(dev_priv, pipe) {
2389                 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2390                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2391
2392                 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2393                         if (IS_IVYBRIDGE(dev_priv))
2394                                 ivb_pipe_crc_irq_handler(dev_priv, pipe);
2395                         else
2396                                 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2397                 }
2398         }
2399
2400         I915_WRITE(GEN7_ERR_INT, err_int);
2401 }
2402
2403 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2404 {
2405         u32 serr_int = I915_READ(SERR_INT);
2406         enum pipe pipe;
2407
2408         if (serr_int & SERR_INT_POISON)
2409                 DRM_ERROR("PCH poison interrupt\n");
2410
2411         for_each_pipe(dev_priv, pipe)
2412                 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
2413                         intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
2414
2415         I915_WRITE(SERR_INT, serr_int);
2416 }
2417
2418 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2419 {
2420         int pipe;
2421         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2422
2423         ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2424
2425         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2426                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2427                                SDE_AUDIO_POWER_SHIFT_CPT);
2428                 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2429                                  port_name(port));
2430         }
2431
2432         if (pch_iir & SDE_AUX_MASK_CPT)
2433                 dp_aux_irq_handler(dev_priv);
2434
2435         if (pch_iir & SDE_GMBUS_CPT)
2436                 gmbus_irq_handler(dev_priv);
2437
2438         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2439                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2440
2441         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2442                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2443
2444         if (pch_iir & SDE_FDI_MASK_CPT)
2445                 for_each_pipe(dev_priv, pipe)
2446                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2447                                          pipe_name(pipe),
2448                                          I915_READ(FDI_RX_IIR(pipe)));
2449
2450         if (pch_iir & SDE_ERROR_CPT)
2451                 cpt_serr_int_handler(dev_priv);
2452 }
2453
2454 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2455 {
2456         u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
2457         u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
2458         u32 pin_mask = 0, long_mask = 0;
2459
2460         if (ddi_hotplug_trigger) {
2461                 u32 dig_hotplug_reg;
2462
2463                 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
2464                 I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
2465
2466                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2467                                    ddi_hotplug_trigger,
2468                                    dig_hotplug_reg, hpd_icp,
2469                                    icp_ddi_port_hotplug_long_detect);
2470         }
2471
2472         if (tc_hotplug_trigger) {
2473                 u32 dig_hotplug_reg;
2474
2475                 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
2476                 I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
2477
2478                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2479                                    tc_hotplug_trigger,
2480                                    dig_hotplug_reg, hpd_icp,
2481                                    icp_tc_port_hotplug_long_detect);
2482         }
2483
2484         if (pin_mask)
2485                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2486
2487         if (pch_iir & SDE_GMBUS_ICP)
2488                 gmbus_irq_handler(dev_priv);
2489 }
2490
2491 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2492 {
2493         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2494                 ~SDE_PORTE_HOTPLUG_SPT;
2495         u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2496         u32 pin_mask = 0, long_mask = 0;
2497
2498         if (hotplug_trigger) {
2499                 u32 dig_hotplug_reg;
2500
2501                 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2502                 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2503
2504                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2505                                    hotplug_trigger, dig_hotplug_reg, hpd_spt,
2506                                    spt_port_hotplug_long_detect);
2507         }
2508
2509         if (hotplug2_trigger) {
2510                 u32 dig_hotplug_reg;
2511
2512                 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2513                 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2514
2515                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2516                                    hotplug2_trigger, dig_hotplug_reg, hpd_spt,
2517                                    spt_port_hotplug2_long_detect);
2518         }
2519
2520         if (pin_mask)
2521                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2522
2523         if (pch_iir & SDE_GMBUS_CPT)
2524                 gmbus_irq_handler(dev_priv);
2525 }
2526
2527 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2528                                 u32 hotplug_trigger,
2529                                 const u32 hpd[HPD_NUM_PINS])
2530 {
2531         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2532
2533         dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2534         I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2535
2536         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2537                            dig_hotplug_reg, hpd,
2538                            ilk_port_hotplug_long_detect);
2539
2540         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2541 }
2542
2543 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2544                                     u32 de_iir)
2545 {
2546         enum pipe pipe;
2547         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2548
2549         if (hotplug_trigger)
2550                 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2551
2552         if (de_iir & DE_AUX_CHANNEL_A)
2553                 dp_aux_irq_handler(dev_priv);
2554
2555         if (de_iir & DE_GSE)
2556                 intel_opregion_asle_intr(dev_priv);
2557
2558         if (de_iir & DE_POISON)
2559                 DRM_ERROR("Poison interrupt\n");
2560
2561         for_each_pipe(dev_priv, pipe) {
2562                 if (de_iir & DE_PIPE_VBLANK(pipe))
2563                         drm_handle_vblank(&dev_priv->drm, pipe);
2564
2565                 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2566                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2567
2568                 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2569                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2570         }
2571
2572         /* check event from PCH */
2573         if (de_iir & DE_PCH_EVENT) {
2574                 u32 pch_iir = I915_READ(SDEIIR);
2575
2576                 if (HAS_PCH_CPT(dev_priv))
2577                         cpt_irq_handler(dev_priv, pch_iir);
2578                 else
2579                         ibx_irq_handler(dev_priv, pch_iir);
2580
2581                 /* should clear PCH hotplug event before clear CPU irq */
2582                 I915_WRITE(SDEIIR, pch_iir);
2583         }
2584
2585         if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
2586                 ironlake_rps_change_irq_handler(dev_priv);
2587 }
2588
2589 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2590                                     u32 de_iir)
2591 {
2592         enum pipe pipe;
2593         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2594
2595         if (hotplug_trigger)
2596                 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2597
2598         if (de_iir & DE_ERR_INT_IVB)
2599                 ivb_err_int_handler(dev_priv);
2600
2601         if (de_iir & DE_EDP_PSR_INT_HSW) {
2602                 u32 psr_iir = I915_READ(EDP_PSR_IIR);
2603
2604                 intel_psr_irq_handler(dev_priv, psr_iir);
2605                 I915_WRITE(EDP_PSR_IIR, psr_iir);
2606         }
2607
2608         if (de_iir & DE_AUX_CHANNEL_A_IVB)
2609                 dp_aux_irq_handler(dev_priv);
2610
2611         if (de_iir & DE_GSE_IVB)
2612                 intel_opregion_asle_intr(dev_priv);
2613
2614         for_each_pipe(dev_priv, pipe) {
2615                 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2616                         drm_handle_vblank(&dev_priv->drm, pipe);
2617         }
2618
2619         /* check event from PCH */
2620         if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2621                 u32 pch_iir = I915_READ(SDEIIR);
2622
2623                 cpt_irq_handler(dev_priv, pch_iir);
2624
2625                 /* clear PCH hotplug event before clear CPU irq */
2626                 I915_WRITE(SDEIIR, pch_iir);
2627         }
2628 }
2629
2630 /*
2631  * To handle irqs with the minimum potential races with fresh interrupts, we:
2632  * 1 - Disable Master Interrupt Control.
2633  * 2 - Find the source(s) of the interrupt.
2634  * 3 - Clear the Interrupt Identity bits (IIR).
2635  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2636  * 5 - Re-enable Master Interrupt Control.
2637  */
2638 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2639 {
2640         struct drm_device *dev = arg;
2641         struct drm_i915_private *dev_priv = to_i915(dev);
2642         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2643         irqreturn_t ret = IRQ_NONE;
2644
2645         if (!intel_irqs_enabled(dev_priv))
2646                 return IRQ_NONE;
2647
2648         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2649         disable_rpm_wakeref_asserts(dev_priv);
2650
2651         /* disable master interrupt before clearing iir  */
2652         de_ier = I915_READ(DEIER);
2653         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2654
2655         /* Disable south interrupts. We'll only write to SDEIIR once, so further
2656          * interrupts will will be stored on its back queue, and then we'll be
2657          * able to process them after we restore SDEIER (as soon as we restore
2658          * it, we'll get an interrupt if SDEIIR still has something to process
2659          * due to its back queue). */
2660         if (!HAS_PCH_NOP(dev_priv)) {
2661                 sde_ier = I915_READ(SDEIER);
2662                 I915_WRITE(SDEIER, 0);
2663         }
2664
2665         /* Find, clear, then process each source of interrupt */
2666
2667         gt_iir = I915_READ(GTIIR);
2668         if (gt_iir) {
2669                 I915_WRITE(GTIIR, gt_iir);
2670                 ret = IRQ_HANDLED;
2671                 if (INTEL_GEN(dev_priv) >= 6)
2672                         snb_gt_irq_handler(dev_priv, gt_iir);
2673                 else
2674                         ilk_gt_irq_handler(dev_priv, gt_iir);
2675         }
2676
2677         de_iir = I915_READ(DEIIR);
2678         if (de_iir) {
2679                 I915_WRITE(DEIIR, de_iir);
2680                 ret = IRQ_HANDLED;
2681                 if (INTEL_GEN(dev_priv) >= 7)
2682                         ivb_display_irq_handler(dev_priv, de_iir);
2683                 else
2684                         ilk_display_irq_handler(dev_priv, de_iir);
2685         }
2686
2687         if (INTEL_GEN(dev_priv) >= 6) {
2688                 u32 pm_iir = I915_READ(GEN6_PMIIR);
2689                 if (pm_iir) {
2690                         I915_WRITE(GEN6_PMIIR, pm_iir);
2691                         ret = IRQ_HANDLED;
2692                         gen6_rps_irq_handler(dev_priv, pm_iir);
2693                 }
2694         }
2695
2696         I915_WRITE(DEIER, de_ier);
2697         if (!HAS_PCH_NOP(dev_priv))
2698                 I915_WRITE(SDEIER, sde_ier);
2699
2700         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2701         enable_rpm_wakeref_asserts(dev_priv);
2702
2703         return ret;
2704 }
2705
2706 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2707                                 u32 hotplug_trigger,
2708                                 const u32 hpd[HPD_NUM_PINS])
2709 {
2710         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2711
2712         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2713         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2714
2715         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2716                            dig_hotplug_reg, hpd,
2717                            bxt_port_hotplug_long_detect);
2718
2719         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2720 }
2721
2722 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2723 {
2724         u32 pin_mask = 0, long_mask = 0;
2725         u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2726         u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2727
2728         if (trigger_tc) {
2729                 u32 dig_hotplug_reg;
2730
2731                 dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
2732                 I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2733
2734                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
2735                                    dig_hotplug_reg, hpd_gen11,
2736                                    gen11_port_hotplug_long_detect);
2737         }
2738
2739         if (trigger_tbt) {
2740                 u32 dig_hotplug_reg;
2741
2742                 dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
2743                 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2744
2745                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
2746                                    dig_hotplug_reg, hpd_gen11,
2747                                    gen11_port_hotplug_long_detect);
2748         }
2749
2750         if (pin_mask)
2751                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2752         else
2753                 DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
2754 }
2755
2756 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
2757 {
2758         u32 mask = GEN8_AUX_CHANNEL_A;
2759
2760         if (INTEL_GEN(dev_priv) >= 9)
2761                 mask |= GEN9_AUX_CHANNEL_B |
2762                         GEN9_AUX_CHANNEL_C |
2763                         GEN9_AUX_CHANNEL_D;
2764
2765         if (IS_CNL_WITH_PORT_F(dev_priv))
2766                 mask |= CNL_AUX_CHANNEL_F;
2767
2768         if (INTEL_GEN(dev_priv) >= 11)
2769                 mask |= ICL_AUX_CHANNEL_E |
2770                         CNL_AUX_CHANNEL_F;
2771
2772         return mask;
2773 }
2774
2775 static irqreturn_t
2776 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2777 {
2778         irqreturn_t ret = IRQ_NONE;
2779         u32 iir;
2780         enum pipe pipe;
2781
2782         if (master_ctl & GEN8_DE_MISC_IRQ) {
2783                 iir = I915_READ(GEN8_DE_MISC_IIR);
2784                 if (iir) {
2785                         bool found = false;
2786
2787                         I915_WRITE(GEN8_DE_MISC_IIR, iir);
2788                         ret = IRQ_HANDLED;
2789
2790                         if (iir & GEN8_DE_MISC_GSE) {
2791                                 intel_opregion_asle_intr(dev_priv);
2792                                 found = true;
2793                         }
2794
2795                         if (iir & GEN8_DE_EDP_PSR) {
2796                                 u32 psr_iir = I915_READ(EDP_PSR_IIR);
2797
2798                                 intel_psr_irq_handler(dev_priv, psr_iir);
2799                                 I915_WRITE(EDP_PSR_IIR, psr_iir);
2800                                 found = true;
2801                         }
2802
2803                         if (!found)
2804                                 DRM_ERROR("Unexpected DE Misc interrupt\n");
2805                 }
2806                 else
2807                         DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2808         }
2809
2810         if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2811                 iir = I915_READ(GEN11_DE_HPD_IIR);
2812                 if (iir) {
2813                         I915_WRITE(GEN11_DE_HPD_IIR, iir);
2814                         ret = IRQ_HANDLED;
2815                         gen11_hpd_irq_handler(dev_priv, iir);
2816                 } else {
2817                         DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
2818                 }
2819         }
2820
2821         if (master_ctl & GEN8_DE_PORT_IRQ) {
2822                 iir = I915_READ(GEN8_DE_PORT_IIR);
2823                 if (iir) {
2824                         u32 tmp_mask;
2825                         bool found = false;
2826
2827                         I915_WRITE(GEN8_DE_PORT_IIR, iir);
2828                         ret = IRQ_HANDLED;
2829
2830                         if (iir & gen8_de_port_aux_mask(dev_priv)) {
2831                                 dp_aux_irq_handler(dev_priv);
2832                                 found = true;
2833                         }
2834
2835                         if (IS_GEN9_LP(dev_priv)) {
2836                                 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2837                                 if (tmp_mask) {
2838                                         bxt_hpd_irq_handler(dev_priv, tmp_mask,
2839                                                             hpd_bxt);
2840                                         found = true;
2841                                 }
2842                         } else if (IS_BROADWELL(dev_priv)) {
2843                                 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2844                                 if (tmp_mask) {
2845                                         ilk_hpd_irq_handler(dev_priv,
2846                                                             tmp_mask, hpd_bdw);
2847                                         found = true;
2848                                 }
2849                         }
2850
2851                         if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2852                                 gmbus_irq_handler(dev_priv);
2853                                 found = true;
2854                         }
2855
2856                         if (!found)
2857                                 DRM_ERROR("Unexpected DE Port interrupt\n");
2858                 }
2859                 else
2860                         DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2861         }
2862
2863         for_each_pipe(dev_priv, pipe) {
2864                 u32 fault_errors;
2865
2866                 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2867                         continue;
2868
2869                 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2870                 if (!iir) {
2871                         DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2872                         continue;
2873                 }
2874
2875                 ret = IRQ_HANDLED;
2876                 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2877
2878                 if (iir & GEN8_PIPE_VBLANK)
2879                         drm_handle_vblank(&dev_priv->drm, pipe);
2880
2881                 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2882                         hsw_pipe_crc_irq_handler(dev_priv, pipe);
2883
2884                 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2885                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2886
2887                 fault_errors = iir;
2888                 if (INTEL_GEN(dev_priv) >= 9)
2889                         fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2890                 else
2891                         fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2892
2893                 if (fault_errors)
2894                         DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
2895                                   pipe_name(pipe),
2896                                   fault_errors);
2897         }
2898
2899         if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2900             master_ctl & GEN8_DE_PCH_IRQ) {
2901                 /*
2902                  * FIXME(BDW): Assume for now that the new interrupt handling
2903                  * scheme also closed the SDE interrupt handling race we've seen
2904                  * on older pch-split platforms. But this needs testing.
2905                  */
2906                 iir = I915_READ(SDEIIR);
2907                 if (iir) {
2908                         I915_WRITE(SDEIIR, iir);
2909                         ret = IRQ_HANDLED;
2910
2911                         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2912                                 icp_irq_handler(dev_priv, iir);
2913                         else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2914                                 spt_irq_handler(dev_priv, iir);
2915                         else
2916                                 cpt_irq_handler(dev_priv, iir);
2917                 } else {
2918                         /*
2919                          * Like on previous PCH there seems to be something
2920                          * fishy going on with forwarding PCH interrupts.
2921                          */
2922                         DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2923                 }
2924         }
2925
2926         return ret;
2927 }
2928
2929 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2930 {
2931         raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2932
2933         /*
2934          * Now with master disabled, get a sample of level indications
2935          * for this interrupt. Indications will be cleared on related acks.
2936          * New indications can and will light up during processing,
2937          * and will generate new interrupt after enabling master.
2938          */
2939         return raw_reg_read(regs, GEN8_MASTER_IRQ);
2940 }
2941
2942 static inline void gen8_master_intr_enable(void __iomem * const regs)
2943 {
2944         raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2945 }
2946
2947 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2948 {
2949         struct drm_i915_private *dev_priv = to_i915(arg);
2950         void __iomem * const regs = dev_priv->uncore.regs;
2951         u32 master_ctl;
2952         u32 gt_iir[4];
2953
2954         if (!intel_irqs_enabled(dev_priv))
2955                 return IRQ_NONE;
2956
2957         master_ctl = gen8_master_intr_disable(regs);
2958         if (!master_ctl) {
2959                 gen8_master_intr_enable(regs);
2960                 return IRQ_NONE;
2961         }
2962
2963         /* Find, clear, then process each source of interrupt */
2964         gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2965
2966         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2967         if (master_ctl & ~GEN8_GT_IRQS) {
2968                 disable_rpm_wakeref_asserts(dev_priv);
2969                 gen8_de_irq_handler(dev_priv, master_ctl);
2970                 enable_rpm_wakeref_asserts(dev_priv);
2971         }
2972
2973         gen8_master_intr_enable(regs);
2974
2975         gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2976
2977         return IRQ_HANDLED;
2978 }
2979
2980 static u32
2981 gen11_gt_engine_identity(struct drm_i915_private * const i915,
2982                          const unsigned int bank, const unsigned int bit)
2983 {
2984         void __iomem * const regs = i915->uncore.regs;
2985         u32 timeout_ts;
2986         u32 ident;
2987
2988         lockdep_assert_held(&i915->irq_lock);
2989
2990         raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
2991
2992         /*
2993          * NB: Specs do not specify how long to spin wait,
2994          * so we do ~100us as an educated guess.
2995          */
2996         timeout_ts = (local_clock() >> 10) + 100;
2997         do {
2998                 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
2999         } while (!(ident & GEN11_INTR_DATA_VALID) &&
3000                  !time_after32(local_clock() >> 10, timeout_ts));
3001
3002         if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
3003                 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
3004                           bank, bit, ident);
3005                 return 0;
3006         }
3007
3008         raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
3009                       GEN11_INTR_DATA_VALID);
3010
3011         return ident;
3012 }
3013
3014 static void
3015 gen11_other_irq_handler(struct drm_i915_private * const i915,
3016                         const u8 instance, const u16 iir)
3017 {
3018         if (instance == OTHER_GTPM_INSTANCE)
3019                 return gen11_rps_irq_handler(i915, iir);
3020
3021         WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
3022                   instance, iir);
3023 }
3024
3025 static void
3026 gen11_engine_irq_handler(struct drm_i915_private * const i915,
3027                          const u8 class, const u8 instance, const u16 iir)
3028 {
3029         struct intel_engine_cs *engine;
3030
3031         if (instance <= MAX_ENGINE_INSTANCE)
3032                 engine = i915->engine_class[class][instance];
3033         else
3034                 engine = NULL;
3035
3036         if (likely(engine))
3037                 return gen8_cs_irq_handler(engine, iir);
3038
3039         WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
3040                   class, instance);
3041 }
3042
3043 static void
3044 gen11_gt_identity_handler(struct drm_i915_private * const i915,
3045                           const u32 identity)
3046 {
3047         const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
3048         const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
3049         const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
3050
3051         if (unlikely(!intr))
3052                 return;
3053
3054         if (class <= COPY_ENGINE_CLASS)
3055                 return gen11_engine_irq_handler(i915, class, instance, intr);
3056
3057         if (class == OTHER_CLASS)
3058                 return gen11_other_irq_handler(i915, instance, intr);
3059
3060         WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
3061                   class, instance, intr);
3062 }
3063
3064 static void
3065 gen11_gt_bank_handler(struct drm_i915_private * const i915,
3066                       const unsigned int bank)
3067 {
3068         void __iomem * const regs = i915->uncore.regs;
3069         unsigned long intr_dw;
3070         unsigned int bit;
3071
3072         lockdep_assert_held(&i915->irq_lock);
3073
3074         intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
3075
3076         for_each_set_bit(bit, &intr_dw, 32) {
3077                 const u32 ident = gen11_gt_engine_identity(i915, bank, bit);
3078
3079                 gen11_gt_identity_handler(i915, ident);
3080         }
3081
3082         /* Clear must be after shared has been served for engine */
3083         raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
3084 }
3085
3086 static void
3087 gen11_gt_irq_handler(struct drm_i915_private * const i915,
3088                      const u32 master_ctl)
3089 {
3090         unsigned int bank;
3091
3092         spin_lock(&i915->irq_lock);
3093
3094         for (bank = 0; bank < 2; bank++) {
3095                 if (master_ctl & GEN11_GT_DW_IRQ(bank))
3096                         gen11_gt_bank_handler(i915, bank);
3097         }
3098
3099         spin_unlock(&i915->irq_lock);
3100 }
3101
3102 static u32
3103 gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
3104 {
3105         void __iomem * const regs = dev_priv->uncore.regs;
3106         u32 iir;
3107
3108         if (!(master_ctl & GEN11_GU_MISC_IRQ))
3109                 return 0;
3110
3111         iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
3112         if (likely(iir))
3113                 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
3114
3115         return iir;
3116 }
3117
3118 static void
3119 gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
3120 {
3121         if (iir & GEN11_GU_MISC_GSE)
3122                 intel_opregion_asle_intr(dev_priv);
3123 }
3124
3125 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
3126 {
3127         raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
3128
3129         /*
3130          * Now with master disabled, get a sample of level indications
3131          * for this interrupt. Indications will be cleared on related acks.
3132          * New indications can and will light up during processing,
3133          * and will generate new interrupt after enabling master.
3134          */
3135         return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
3136 }
3137
3138 static inline void gen11_master_intr_enable(void __iomem * const regs)
3139 {
3140         raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
3141 }
3142
3143 static irqreturn_t gen11_irq_handler(int irq, void *arg)
3144 {
3145         struct drm_i915_private * const i915 = to_i915(arg);
3146         void __iomem * const regs = i915->uncore.regs;
3147         u32 master_ctl;
3148         u32 gu_misc_iir;
3149
3150         if (!intel_irqs_enabled(i915))
3151                 return IRQ_NONE;
3152
3153         master_ctl = gen11_master_intr_disable(regs);
3154         if (!master_ctl) {
3155                 gen11_master_intr_enable(regs);
3156                 return IRQ_NONE;
3157         }
3158
3159         /* Find, clear, then process each source of interrupt. */
3160         gen11_gt_irq_handler(i915, master_ctl);
3161
3162         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3163         if (master_ctl & GEN11_DISPLAY_IRQ) {
3164                 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
3165
3166                 disable_rpm_wakeref_asserts(i915);
3167                 /*
3168                  * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
3169                  * for the display related bits.
3170                  */
3171                 gen8_de_irq_handler(i915, disp_ctl);
3172                 enable_rpm_wakeref_asserts(i915);
3173         }
3174
3175         gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
3176
3177         gen11_master_intr_enable(regs);
3178
3179         gen11_gu_misc_irq_handler(i915, gu_misc_iir);
3180
3181         return IRQ_HANDLED;
3182 }
3183
3184 /* Called from drm generic code, passed 'crtc' which
3185  * we use as a pipe index
3186  */
3187 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
3188 {
3189         struct drm_i915_private *dev_priv = to_i915(dev);
3190         unsigned long irqflags;
3191
3192         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3193         i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3194         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3195
3196         return 0;
3197 }
3198
3199 static int i945gm_enable_vblank(struct drm_device *dev, unsigned int pipe)
3200 {
3201         struct drm_i915_private *dev_priv = to_i915(dev);
3202
3203         if (dev_priv->i945gm_vblank.enabled++ == 0)
3204                 schedule_work(&dev_priv->i945gm_vblank.work);
3205
3206         return i8xx_enable_vblank(dev, pipe);
3207 }
3208
3209 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
3210 {
3211         struct drm_i915_private *dev_priv = to_i915(dev);
3212         unsigned long irqflags;
3213
3214         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3215         i915_enable_pipestat(dev_priv, pipe,
3216                              PIPE_START_VBLANK_INTERRUPT_STATUS);
3217         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3218
3219         return 0;
3220 }
3221
3222 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
3223 {
3224         struct drm_i915_private *dev_priv = to_i915(dev);
3225         unsigned long irqflags;
3226         u32 bit = INTEL_GEN(dev_priv) >= 7 ?
3227                 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3228
3229         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3230         ilk_enable_display_irq(dev_priv, bit);
3231         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3232
3233         /* Even though there is no DMC, frame counter can get stuck when
3234          * PSR is active as no frames are generated.
3235          */
3236         if (HAS_PSR(dev_priv))
3237                 drm_vblank_restore(dev, pipe);
3238
3239         return 0;
3240 }
3241
3242 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
3243 {
3244         struct drm_i915_private *dev_priv = to_i915(dev);
3245         unsigned long irqflags;
3246
3247         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3248         bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3249         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3250
3251         /* Even if there is no DMC, frame counter can get stuck when
3252          * PSR is active as no frames are generated, so check only for PSR.
3253          */
3254         if (HAS_PSR(dev_priv))
3255                 drm_vblank_restore(dev, pipe);
3256
3257         return 0;
3258 }
3259
3260 /* Called from drm generic code, passed 'crtc' which
3261  * we use as a pipe index
3262  */
3263 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
3264 {
3265         struct drm_i915_private *dev_priv = to_i915(dev);
3266         unsigned long irqflags;
3267
3268         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3269         i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3270         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3271 }
3272
3273 static void i945gm_disable_vblank(struct drm_device *dev, unsigned int pipe)
3274 {
3275         struct drm_i915_private *dev_priv = to_i915(dev);
3276
3277         i8xx_disable_vblank(dev, pipe);
3278
3279         if (--dev_priv->i945gm_vblank.enabled == 0)
3280                 schedule_work(&dev_priv->i945gm_vblank.work);
3281 }
3282
3283 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
3284 {
3285         struct drm_i915_private *dev_priv = to_i915(dev);
3286         unsigned long irqflags;
3287
3288         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3289         i915_disable_pipestat(dev_priv, pipe,
3290                               PIPE_START_VBLANK_INTERRUPT_STATUS);
3291         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3292 }
3293
3294 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
3295 {
3296         struct drm_i915_private *dev_priv = to_i915(dev);
3297         unsigned long irqflags;
3298         u32 bit = INTEL_GEN(dev_priv) >= 7 ?
3299                 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3300
3301         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3302         ilk_disable_display_irq(dev_priv, bit);
3303         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3304 }
3305
3306 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
3307 {
3308         struct drm_i915_private *dev_priv = to_i915(dev);
3309         unsigned long irqflags;
3310
3311         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3312         bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3313         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3314 }
3315
3316 static void i945gm_vblank_work_func(struct work_struct *work)
3317 {
3318         struct drm_i915_private *dev_priv =
3319                 container_of(work, struct drm_i915_private, i945gm_vblank.work);
3320
3321         /*
3322          * Vblank interrupts fail to wake up the device from C3,
3323          * hence we want to prevent C3 usage while vblank interrupts
3324          * are enabled.
3325          */
3326         pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos,
3327                               READ_ONCE(dev_priv->i945gm_vblank.enabled) ?
3328                               dev_priv->i945gm_vblank.c3_disable_latency :
3329                               PM_QOS_DEFAULT_VALUE);
3330 }
3331
3332 static int cstate_disable_latency(const char *name)
3333 {
3334         const struct cpuidle_driver *drv;
3335         int i;
3336
3337         drv = cpuidle_get_driver();
3338         if (!drv)
3339                 return 0;
3340
3341         for (i = 0; i < drv->state_count; i++) {
3342                 const struct cpuidle_state *state = &drv->states[i];
3343
3344                 if (!strcmp(state->name, name))
3345                         return state->exit_latency ?
3346                                 state->exit_latency - 1 : 0;
3347         }
3348
3349         return 0;
3350 }
3351
3352 static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv)
3353 {
3354         INIT_WORK(&dev_priv->i945gm_vblank.work,
3355                   i945gm_vblank_work_func);
3356
3357         dev_priv->i945gm_vblank.c3_disable_latency =
3358                 cstate_disable_latency("C3");
3359         pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos,
3360                            PM_QOS_CPU_DMA_LATENCY,
3361                            PM_QOS_DEFAULT_VALUE);
3362 }
3363
3364 static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv)
3365 {
3366         cancel_work_sync(&dev_priv->i945gm_vblank.work);
3367         pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos);
3368 }
3369
3370 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
3371 {
3372         struct intel_uncore *uncore = &dev_priv->uncore;
3373
3374         if (HAS_PCH_NOP(dev_priv))
3375                 return;
3376
3377         GEN3_IRQ_RESET(uncore, SDE);
3378
3379         if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3380                 I915_WRITE(SERR_INT, 0xffffffff);
3381 }
3382
3383 /*
3384  * SDEIER is also touched by the interrupt handler to work around missed PCH
3385  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3386  * instead we unconditionally enable all PCH interrupt sources here, but then
3387  * only unmask them as needed with SDEIMR.
3388  *
3389  * This function needs to be called before interrupts are enabled.
3390  */
3391 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3392 {
3393         struct drm_i915_private *dev_priv = to_i915(dev);
3394
3395         if (HAS_PCH_NOP(dev_priv))
3396                 return;
3397
3398         WARN_ON(I915_READ(SDEIER) != 0);
3399         I915_WRITE(SDEIER, 0xffffffff);
3400         POSTING_READ(SDEIER);
3401 }
3402
3403 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
3404 {
3405         struct intel_uncore *uncore = &dev_priv->uncore;
3406
3407         GEN3_IRQ_RESET(uncore, GT);
3408         if (INTEL_GEN(dev_priv) >= 6)
3409                 GEN3_IRQ_RESET(uncore, GEN6_PM);
3410 }
3411
3412 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3413 {
3414         struct intel_uncore *uncore = &dev_priv->uncore;
3415
3416         if (IS_CHERRYVIEW(dev_priv))
3417                 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3418         else
3419                 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3420
3421         i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3422         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3423
3424         i9xx_pipestat_irq_reset(dev_priv);
3425
3426         GEN3_IRQ_RESET(uncore, VLV_);
3427         dev_priv->irq_mask = ~0u;
3428 }
3429
3430 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3431 {
3432         struct intel_uncore *uncore = &dev_priv->uncore;
3433
3434         u32 pipestat_mask;
3435         u32 enable_mask;
3436         enum pipe pipe;
3437
3438         pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
3439
3440         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3441         for_each_pipe(dev_priv, pipe)
3442                 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3443
3444         enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3445                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3446                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3447                 I915_LPE_PIPE_A_INTERRUPT |
3448                 I915_LPE_PIPE_B_INTERRUPT;
3449
3450         if (IS_CHERRYVIEW(dev_priv))
3451                 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3452                         I915_LPE_PIPE_C_INTERRUPT;
3453
3454         WARN_ON(dev_priv->irq_mask != ~0u);
3455
3456         dev_priv->irq_mask = ~enable_mask;
3457
3458         GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
3459 }
3460
3461 /* drm_dma.h hooks
3462 */
3463 static void ironlake_irq_reset(struct drm_device *dev)
3464 {
3465         struct drm_i915_private *dev_priv = to_i915(dev);
3466         struct intel_uncore *uncore = &dev_priv->uncore;
3467
3468         GEN3_IRQ_RESET(uncore, DE);
3469         if (IS_GEN(dev_priv, 7))
3470                 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3471
3472         if (IS_HASWELL(dev_priv)) {
3473                 I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3474                 I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3475         }
3476
3477         gen5_gt_irq_reset(dev_priv);
3478
3479         ibx_irq_reset(dev_priv);
3480 }
3481
3482 static void valleyview_irq_reset(struct drm_device *dev)
3483 {
3484         struct drm_i915_private *dev_priv = to_i915(dev);
3485
3486         I915_WRITE(VLV_MASTER_IER, 0);
3487         POSTING_READ(VLV_MASTER_IER);
3488
3489         gen5_gt_irq_reset(dev_priv);
3490
3491         spin_lock_irq(&dev_priv->irq_lock);
3492         if (dev_priv->display_irqs_enabled)
3493                 vlv_display_irq_reset(dev_priv);
3494         spin_unlock_irq(&dev_priv->irq_lock);
3495 }
3496
3497 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3498 {
3499         struct intel_uncore *uncore = &dev_priv->uncore;
3500
3501         GEN8_IRQ_RESET_NDX(uncore, GT, 0);
3502         GEN8_IRQ_RESET_NDX(uncore, GT, 1);
3503         GEN8_IRQ_RESET_NDX(uncore, GT, 2);
3504         GEN8_IRQ_RESET_NDX(uncore, GT, 3);
3505 }
3506
3507 static void gen8_irq_reset(struct drm_device *dev)
3508 {
3509         struct drm_i915_private *dev_priv = to_i915(dev);
3510         struct intel_uncore *uncore = &dev_priv->uncore;
3511         int pipe;
3512
3513         gen8_master_intr_disable(dev_priv->uncore.regs);
3514
3515         gen8_gt_irq_reset(dev_priv);
3516
3517         I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3518         I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3519
3520         for_each_pipe(dev_priv, pipe)
3521                 if (intel_display_power_is_enabled(dev_priv,
3522                                                    POWER_DOMAIN_PIPE(pipe)))
3523                         GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3524
3525         GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3526         GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3527         GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3528
3529         if (HAS_PCH_SPLIT(dev_priv))
3530                 ibx_irq_reset(dev_priv);
3531 }
3532
3533 static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
3534 {
3535         /* Disable RCS, BCS, VCS and VECS class engines. */
3536         I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0);
3537         I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE,    0);
3538
3539         /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
3540         I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK,   ~0);
3541         I915_WRITE(GEN11_BCS_RSVD_INTR_MASK,    ~0);
3542         I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK,   ~0);
3543         I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK,   ~0);
3544         I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0);
3545
3546         I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
3547         I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
3548 }
3549
3550 static void gen11_irq_reset(struct drm_device *dev)
3551 {
3552         struct drm_i915_private *dev_priv = dev->dev_private;
3553         struct intel_uncore *uncore = &dev_priv->uncore;
3554         int pipe;
3555
3556         gen11_master_intr_disable(dev_priv->uncore.regs);
3557
3558         gen11_gt_irq_reset(dev_priv);
3559
3560         I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
3561
3562         I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3563         I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3564
3565         for_each_pipe(dev_priv, pipe)
3566                 if (intel_display_power_is_enabled(dev_priv,
3567                                                    POWER_DOMAIN_PIPE(pipe)))
3568                         GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3569
3570         GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3571         GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3572         GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
3573         GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3574         GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3575
3576         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3577                 GEN3_IRQ_RESET(uncore, SDE);
3578 }
3579
3580 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3581                                      u8 pipe_mask)
3582 {
3583         struct intel_uncore *uncore = &dev_priv->uncore;
3584
3585         u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3586         enum pipe pipe;
3587
3588         spin_lock_irq(&dev_priv->irq_lock);
3589
3590         if (!intel_irqs_enabled(dev_priv)) {
3591                 spin_unlock_irq(&dev_priv->irq_lock);
3592                 return;
3593         }
3594
3595         for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3596                 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3597                                   dev_priv->de_irq_mask[pipe],
3598                                   ~dev_priv->de_irq_mask[pipe] | extra_ier);
3599
3600         spin_unlock_irq(&dev_priv->irq_lock);
3601 }
3602
3603 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3604                                      u8 pipe_mask)
3605 {
3606         struct intel_uncore *uncore = &dev_priv->uncore;
3607         enum pipe pipe;
3608
3609         spin_lock_irq(&dev_priv->irq_lock);
3610
3611         if (!intel_irqs_enabled(dev_priv)) {
3612                 spin_unlock_irq(&dev_priv->irq_lock);
3613                 return;
3614         }
3615
3616         for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3617                 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3618
3619         spin_unlock_irq(&dev_priv->irq_lock);
3620
3621         /* make sure we're done processing display irqs */
3622         synchronize_irq(dev_priv->drm.irq);
3623 }
3624
3625 static void cherryview_irq_reset(struct drm_device *dev)
3626 {
3627         struct drm_i915_private *dev_priv = to_i915(dev);
3628         struct intel_uncore *uncore = &dev_priv->uncore;
3629
3630         I915_WRITE(GEN8_MASTER_IRQ, 0);
3631         POSTING_READ(GEN8_MASTER_IRQ);
3632
3633         gen8_gt_irq_reset(dev_priv);
3634
3635         GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3636
3637         spin_lock_irq(&dev_priv->irq_lock);
3638         if (dev_priv->display_irqs_enabled)
3639                 vlv_display_irq_reset(dev_priv);
3640         spin_unlock_irq(&dev_priv->irq_lock);
3641 }
3642
3643 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3644                                   const u32 hpd[HPD_NUM_PINS])
3645 {
3646         struct intel_encoder *encoder;
3647         u32 enabled_irqs = 0;
3648
3649         for_each_intel_encoder(&dev_priv->drm, encoder)
3650                 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3651                         enabled_irqs |= hpd[encoder->hpd_pin];
3652
3653         return enabled_irqs;
3654 }
3655
3656 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3657 {
3658         u32 hotplug;
3659
3660         /*
3661          * Enable digital hotplug on the PCH, and configure the DP short pulse
3662          * duration to 2ms (which is the minimum in the Display Port spec).
3663          * The pulse duration bits are reserved on LPT+.
3664          */
3665         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3666         hotplug &= ~(PORTB_PULSE_DURATION_MASK |
3667                      PORTC_PULSE_DURATION_MASK |
3668                      PORTD_PULSE_DURATION_MASK);
3669         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3670         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3671         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3672         /*
3673          * When CPU and PCH are on the same package, port A
3674          * HPD must be enabled in both north and south.
3675          */
3676         if (HAS_PCH_LPT_LP(dev_priv))
3677                 hotplug |= PORTA_HOTPLUG_ENABLE;
3678         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3679 }
3680
3681 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3682 {
3683         u32 hotplug_irqs, enabled_irqs;
3684
3685         if (HAS_PCH_IBX(dev_priv)) {
3686                 hotplug_irqs = SDE_HOTPLUG_MASK;
3687                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3688         } else {
3689                 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3690                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3691         }
3692
3693         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3694
3695         ibx_hpd_detection_setup(dev_priv);
3696 }
3697
3698 static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
3699 {
3700         u32 hotplug;
3701
3702         hotplug = I915_READ(SHOTPLUG_CTL_DDI);
3703         hotplug |= ICP_DDIA_HPD_ENABLE |
3704                    ICP_DDIB_HPD_ENABLE;
3705         I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
3706
3707         hotplug = I915_READ(SHOTPLUG_CTL_TC);
3708         hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) |
3709                    ICP_TC_HPD_ENABLE(PORT_TC2) |
3710                    ICP_TC_HPD_ENABLE(PORT_TC3) |
3711                    ICP_TC_HPD_ENABLE(PORT_TC4);
3712         I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
3713 }
3714
3715 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3716 {
3717         u32 hotplug_irqs, enabled_irqs;
3718
3719         hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
3720         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
3721
3722         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3723
3724         icp_hpd_detection_setup(dev_priv);
3725 }
3726
3727 static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
3728 {
3729         u32 hotplug;
3730
3731         hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
3732         hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3733                    GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3734                    GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3735                    GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3736         I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3737
3738         hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
3739         hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3740                    GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3741                    GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3742                    GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3743         I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3744 }
3745
3746 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3747 {
3748         u32 hotplug_irqs, enabled_irqs;
3749         u32 val;
3750
3751         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
3752         hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
3753
3754         val = I915_READ(GEN11_DE_HPD_IMR);
3755         val &= ~hotplug_irqs;
3756         I915_WRITE(GEN11_DE_HPD_IMR, val);
3757         POSTING_READ(GEN11_DE_HPD_IMR);
3758
3759         gen11_hpd_detection_setup(dev_priv);
3760
3761         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3762                 icp_hpd_irq_setup(dev_priv);
3763 }
3764
3765 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3766 {
3767         u32 val, hotplug;
3768
3769         /* Display WA #1179 WaHardHangonHotPlug: cnp */
3770         if (HAS_PCH_CNP(dev_priv)) {
3771                 val = I915_READ(SOUTH_CHICKEN1);
3772                 val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3773                 val |= CHASSIS_CLK_REQ_DURATION(0xf);
3774                 I915_WRITE(SOUTH_CHICKEN1, val);
3775         }
3776
3777         /* Enable digital hotplug on the PCH */
3778         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3779         hotplug |= PORTA_HOTPLUG_ENABLE |
3780                    PORTB_HOTPLUG_ENABLE |
3781                    PORTC_HOTPLUG_ENABLE |
3782                    PORTD_HOTPLUG_ENABLE;
3783         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3784
3785         hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3786         hotplug |= PORTE_HOTPLUG_ENABLE;
3787         I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3788 }
3789
3790 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3791 {
3792         u32 hotplug_irqs, enabled_irqs;
3793
3794         hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3795         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3796
3797         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3798
3799         spt_hpd_detection_setup(dev_priv);
3800 }
3801
3802 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3803 {
3804         u32 hotplug;
3805
3806         /*
3807          * Enable digital hotplug on the CPU, and configure the DP short pulse
3808          * duration to 2ms (which is the minimum in the Display Port spec)
3809          * The pulse duration bits are reserved on HSW+.
3810          */
3811         hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3812         hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3813         hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
3814                    DIGITAL_PORTA_PULSE_DURATION_2ms;
3815         I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3816 }
3817
3818 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3819 {
3820         u32 hotplug_irqs, enabled_irqs;
3821
3822         if (INTEL_GEN(dev_priv) >= 8) {
3823                 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3824                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3825
3826                 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3827         } else if (INTEL_GEN(dev_priv) >= 7) {
3828                 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3829                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3830
3831                 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3832         } else {
3833                 hotplug_irqs = DE_DP_A_HOTPLUG;
3834                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3835
3836                 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3837         }
3838
3839         ilk_hpd_detection_setup(dev_priv);
3840
3841         ibx_hpd_irq_setup(dev_priv);
3842 }
3843
3844 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
3845                                       u32 enabled_irqs)
3846 {
3847         u32 hotplug;
3848
3849         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3850         hotplug |= PORTA_HOTPLUG_ENABLE |
3851                    PORTB_HOTPLUG_ENABLE |
3852                    PORTC_HOTPLUG_ENABLE;
3853
3854         DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3855                       hotplug, enabled_irqs);
3856         hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3857
3858         /*
3859          * For BXT invert bit has to be set based on AOB design
3860          * for HPD detection logic, update it based on VBT fields.
3861          */
3862         if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3863             intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3864                 hotplug |= BXT_DDIA_HPD_INVERT;
3865         if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3866             intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3867                 hotplug |= BXT_DDIB_HPD_INVERT;
3868         if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3869             intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3870                 hotplug |= BXT_DDIC_HPD_INVERT;
3871
3872         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3873 }
3874
3875 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3876 {
3877         __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
3878 }
3879
3880 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3881 {
3882         u32 hotplug_irqs, enabled_irqs;
3883
3884         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3885         hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3886
3887         bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3888
3889         __bxt_hpd_detection_setup(dev_priv, enabled_irqs);
3890 }
3891
3892 static void ibx_irq_postinstall(struct drm_device *dev)
3893 {
3894         struct drm_i915_private *dev_priv = to_i915(dev);
3895         u32 mask;
3896
3897         if (HAS_PCH_NOP(dev_priv))
3898                 return;
3899
3900         if (HAS_PCH_IBX(dev_priv))
3901                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3902         else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3903                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3904         else
3905                 mask = SDE_GMBUS_CPT;
3906
3907         gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3908         I915_WRITE(SDEIMR, ~mask);
3909
3910         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
3911             HAS_PCH_LPT(dev_priv))
3912                 ibx_hpd_detection_setup(dev_priv);
3913         else
3914                 spt_hpd_detection_setup(dev_priv);
3915 }
3916
3917 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3918 {
3919         struct drm_i915_private *dev_priv = to_i915(dev);
3920         struct intel_uncore *uncore = &dev_priv->uncore;
3921         u32 pm_irqs, gt_irqs;
3922
3923         pm_irqs = gt_irqs = 0;
3924
3925         dev_priv->gt_irq_mask = ~0;
3926         if (HAS_L3_DPF(dev_priv)) {
3927                 /* L3 parity interrupt is always unmasked. */
3928                 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
3929                 gt_irqs |= GT_PARITY_ERROR(dev_priv);
3930         }
3931
3932         gt_irqs |= GT_RENDER_USER_INTERRUPT;
3933         if (IS_GEN(dev_priv, 5)) {
3934                 gt_irqs |= ILK_BSD_USER_INTERRUPT;
3935         } else {
3936                 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3937         }
3938
3939         GEN3_IRQ_INIT(uncore, GT, dev_priv->gt_irq_mask, gt_irqs);
3940
3941         if (INTEL_GEN(dev_priv) >= 6) {
3942                 /*
3943                  * RPS interrupts will get enabled/disabled on demand when RPS
3944                  * itself is enabled/disabled.
3945                  */
3946                 if (HAS_ENGINE(dev_priv, VECS0)) {
3947                         pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3948                         dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
3949                 }
3950
3951                 dev_priv->pm_imr = 0xffffffff;
3952                 GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->pm_imr, pm_irqs);
3953         }
3954 }
3955
3956 static int ironlake_irq_postinstall(struct drm_device *dev)
3957 {
3958         struct drm_i915_private *dev_priv = to_i915(dev);
3959         struct intel_uncore *uncore = &dev_priv->uncore;
3960         u32 display_mask, extra_mask;
3961
3962         if (INTEL_GEN(dev_priv) >= 7) {
3963                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3964                                 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3965                 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3966                               DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3967                               DE_DP_A_HOTPLUG_IVB);
3968         } else {
3969                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3970                                 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3971                                 DE_PIPEA_CRC_DONE | DE_POISON);
3972                 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3973                               DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3974                               DE_DP_A_HOTPLUG);
3975         }
3976
3977         if (IS_HASWELL(dev_priv)) {
3978                 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3979                 intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
3980                 display_mask |= DE_EDP_PSR_INT_HSW;
3981         }
3982
3983         dev_priv->irq_mask = ~display_mask;
3984
3985         ibx_irq_pre_postinstall(dev);
3986
3987         GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3988                       display_mask | extra_mask);
3989
3990         gen5_gt_irq_postinstall(dev);
3991
3992         ilk_hpd_detection_setup(dev_priv);
3993
3994         ibx_irq_postinstall(dev);
3995
3996         if (IS_IRONLAKE_M(dev_priv)) {
3997                 /* Enable PCU event interrupts
3998                  *
3999                  * spinlocking not required here for correctness since interrupt
4000                  * setup is guaranteed to run in single-threaded context. But we
4001                  * need it to make the assert_spin_locked happy. */
4002                 spin_lock_irq(&dev_priv->irq_lock);
4003                 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
4004                 spin_unlock_irq(&dev_priv->irq_lock);
4005         }
4006
4007         return 0;
4008 }
4009
4010 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
4011 {
4012         lockdep_assert_held(&dev_priv->irq_lock);
4013
4014         if (dev_priv->display_irqs_enabled)
4015                 return;
4016
4017         dev_priv->display_irqs_enabled = true;
4018
4019         if (intel_irqs_enabled(dev_priv)) {
4020                 vlv_display_irq_reset(dev_priv);
4021                 vlv_display_irq_postinstall(dev_priv);
4022         }
4023 }
4024
4025 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
4026 {
4027         lockdep_assert_held(&dev_priv->irq_lock);
4028
4029         if (!dev_priv->display_irqs_enabled)
4030                 return;
4031
4032         dev_priv->display_irqs_enabled = false;
4033
4034         if (intel_irqs_enabled(dev_priv))
4035                 vlv_display_irq_reset(dev_priv);
4036 }
4037
4038
4039 static int valleyview_irq_postinstall(struct drm_device *dev)
4040 {
4041         struct drm_i915_private *dev_priv = to_i915(dev);
4042
4043         gen5_gt_irq_postinstall(dev);
4044
4045         spin_lock_irq(&dev_priv->irq_lock);
4046         if (dev_priv->display_irqs_enabled)
4047                 vlv_display_irq_postinstall(dev_priv);
4048         spin_unlock_irq(&dev_priv->irq_lock);
4049
4050         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
4051         POSTING_READ(VLV_MASTER_IER);
4052
4053         return 0;
4054 }
4055
4056 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
4057 {
4058         struct intel_uncore *uncore = &dev_priv->uncore;
4059
4060         /* These are interrupts we'll toggle with the ring mask register */
4061         u32 gt_interrupts[] = {
4062                 (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
4063                  GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
4064                  GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
4065                  GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
4066
4067                 (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
4068                  GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
4069                  GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
4070                  GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
4071
4072                 0,
4073
4074                 (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
4075                  GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
4076         };
4077
4078         dev_priv->pm_ier = 0x0;
4079         dev_priv->pm_imr = ~dev_priv->pm_ier;
4080         GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
4081         GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
4082         /*
4083          * RPS interrupts will get enabled/disabled on demand when RPS itself
4084          * is enabled/disabled. Same wil be the case for GuC interrupts.
4085          */
4086         GEN8_IRQ_INIT_NDX(uncore, GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
4087         GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
4088 }
4089
4090 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
4091 {
4092         struct intel_uncore *uncore = &dev_priv->uncore;
4093
4094         u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
4095         u32 de_pipe_enables;
4096         u32 de_port_masked = GEN8_AUX_CHANNEL_A;
4097         u32 de_port_enables;
4098         u32 de_misc_masked = GEN8_DE_EDP_PSR;
4099         enum pipe pipe;
4100
4101         if (INTEL_GEN(dev_priv) <= 10)
4102                 de_misc_masked |= GEN8_DE_MISC_GSE;
4103
4104         if (INTEL_GEN(dev_priv) >= 9) {
4105                 de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
4106                 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
4107                                   GEN9_AUX_CHANNEL_D;
4108                 if (IS_GEN9_LP(dev_priv))
4109                         de_port_masked |= BXT_DE_PORT_GMBUS;
4110         } else {
4111                 de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
4112         }
4113
4114         if (INTEL_GEN(dev_priv) >= 11)
4115                 de_port_masked |= ICL_AUX_CHANNEL_E;
4116
4117         if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
4118                 de_port_masked |= CNL_AUX_CHANNEL_F;
4119
4120         de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
4121                                            GEN8_PIPE_FIFO_UNDERRUN;
4122
4123         de_port_enables = de_port_masked;
4124         if (IS_GEN9_LP(dev_priv))
4125                 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
4126         else if (IS_BROADWELL(dev_priv))
4127                 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
4128
4129         gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
4130         intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4131
4132         for_each_pipe(dev_priv, pipe) {
4133                 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
4134
4135                 if (intel_display_power_is_enabled(dev_priv,
4136                                 POWER_DOMAIN_PIPE(pipe)))
4137                         GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
4138                                           dev_priv->de_irq_mask[pipe],
4139                                           de_pipe_enables);
4140         }
4141
4142         GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
4143         GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
4144
4145         if (INTEL_GEN(dev_priv) >= 11) {
4146                 u32 de_hpd_masked = 0;
4147                 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
4148                                      GEN11_DE_TBT_HOTPLUG_MASK;
4149
4150                 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
4151                               de_hpd_enables);
4152                 gen11_hpd_detection_setup(dev_priv);
4153         } else if (IS_GEN9_LP(dev_priv)) {
4154                 bxt_hpd_detection_setup(dev_priv);
4155         } else if (IS_BROADWELL(dev_priv)) {
4156                 ilk_hpd_detection_setup(dev_priv);
4157         }
4158 }
4159
4160 static int gen8_irq_postinstall(struct drm_device *dev)
4161 {
4162         struct drm_i915_private *dev_priv = to_i915(dev);
4163
4164         if (HAS_PCH_SPLIT(dev_priv))
4165                 ibx_irq_pre_postinstall(dev);
4166
4167         gen8_gt_irq_postinstall(dev_priv);
4168         gen8_de_irq_postinstall(dev_priv);
4169
4170         if (HAS_PCH_SPLIT(dev_priv))
4171                 ibx_irq_postinstall(dev);
4172
4173         gen8_master_intr_enable(dev_priv->uncore.regs);
4174
4175         return 0;
4176 }
4177
4178 static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
4179 {
4180         const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
4181
4182         BUILD_BUG_ON(irqs & 0xffff0000);
4183
4184         /* Enable RCS, BCS, VCS and VECS class interrupts. */
4185         I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs);
4186         I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE,    irqs << 16 | irqs);
4187
4188         /* Unmask irqs on RCS, BCS, VCS and VECS engines. */
4189         I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK,   ~(irqs << 16));
4190         I915_WRITE(GEN11_BCS_RSVD_INTR_MASK,    ~(irqs << 16));
4191         I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK,   ~(irqs | irqs << 16));
4192         I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK,   ~(irqs | irqs << 16));
4193         I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16));
4194
4195         /*
4196          * RPS interrupts will get enabled/disabled on demand when RPS itself
4197          * is enabled/disabled.
4198          */
4199         dev_priv->pm_ier = 0x0;
4200         dev_priv->pm_imr = ~dev_priv->pm_ier;
4201         I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
4202         I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
4203 }
4204
4205 static void icp_irq_postinstall(struct drm_device *dev)
4206 {
4207         struct drm_i915_private *dev_priv = to_i915(dev);
4208         u32 mask = SDE_GMBUS_ICP;
4209
4210         WARN_ON(I915_READ(SDEIER) != 0);
4211         I915_WRITE(SDEIER, 0xffffffff);
4212         POSTING_READ(SDEIER);
4213
4214         gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
4215         I915_WRITE(SDEIMR, ~mask);
4216
4217         icp_hpd_detection_setup(dev_priv);
4218 }
4219
4220 static int gen11_irq_postinstall(struct drm_device *dev)
4221 {
4222         struct drm_i915_private *dev_priv = dev->dev_private;
4223         struct intel_uncore *uncore = &dev_priv->uncore;
4224         u32 gu_misc_masked = GEN11_GU_MISC_GSE;
4225
4226         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
4227                 icp_irq_postinstall(dev);
4228
4229         gen11_gt_irq_postinstall(dev_priv);
4230         gen8_de_irq_postinstall(dev_priv);
4231
4232         GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
4233
4234         I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
4235
4236         gen11_master_intr_enable(dev_priv->uncore.regs);
4237         POSTING_READ(GEN11_GFX_MSTR_IRQ);
4238
4239         return 0;
4240 }
4241
4242 static int cherryview_irq_postinstall(struct drm_device *dev)
4243 {
4244         struct drm_i915_private *dev_priv = to_i915(dev);
4245
4246         gen8_gt_irq_postinstall(dev_priv);
4247
4248         spin_lock_irq(&dev_priv->irq_lock);
4249         if (dev_priv->display_irqs_enabled)
4250                 vlv_display_irq_postinstall(dev_priv);
4251         spin_unlock_irq(&dev_priv->irq_lock);
4252
4253         I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
4254         POSTING_READ(GEN8_MASTER_IRQ);
4255
4256         return 0;
4257 }
4258
4259 static void i8xx_irq_reset(struct drm_device *dev)
4260 {
4261         struct drm_i915_private *dev_priv = to_i915(dev);
4262         struct intel_uncore *uncore = &dev_priv->uncore;
4263
4264         i9xx_pipestat_irq_reset(dev_priv);
4265
4266         GEN2_IRQ_RESET(uncore);
4267 }
4268
4269 static int i8xx_irq_postinstall(struct drm_device *dev)
4270 {
4271         struct drm_i915_private *dev_priv = to_i915(dev);
4272         struct intel_uncore *uncore = &dev_priv->uncore;
4273         u16 enable_mask;
4274
4275         I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
4276                             I915_ERROR_MEMORY_REFRESH));
4277
4278         /* Unmask the interrupts that we always want on. */
4279         dev_priv->irq_mask =
4280                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4281                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4282                   I915_MASTER_ERROR_INTERRUPT);
4283
4284         enable_mask =
4285                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4286                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4287                 I915_MASTER_ERROR_INTERRUPT |
4288                 I915_USER_INTERRUPT;
4289
4290         GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
4291
4292         /* Interrupt setup is already guaranteed to be single-threaded, this is
4293          * just to make the assert_spin_locked check happy. */
4294         spin_lock_irq(&dev_priv->irq_lock);
4295         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4296         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4297         spin_unlock_irq(&dev_priv->irq_lock);
4298
4299         return 0;
4300 }
4301
4302 static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv,
4303                                u16 *eir, u16 *eir_stuck)
4304 {
4305         u16 emr;
4306
4307         *eir = I915_READ16(EIR);
4308
4309         if (*eir)
4310                 I915_WRITE16(EIR, *eir);
4311
4312         *eir_stuck = I915_READ16(EIR);
4313         if (*eir_stuck == 0)
4314                 return;
4315
4316         /*
4317          * Toggle all EMR bits to make sure we get an edge
4318          * in the ISR master error bit if we don't clear
4319          * all the EIR bits. Otherwise the edge triggered
4320          * IIR on i965/g4x wouldn't notice that an interrupt
4321          * is still pending. Also some EIR bits can't be
4322          * cleared except by handling the underlying error
4323          * (or by a GPU reset) so we mask any bit that
4324          * remains set.
4325          */
4326         emr = I915_READ16(EMR);
4327         I915_WRITE16(EMR, 0xffff);
4328         I915_WRITE16(EMR, emr | *eir_stuck);
4329 }
4330
4331 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
4332                                    u16 eir, u16 eir_stuck)
4333 {
4334         DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
4335
4336         if (eir_stuck)
4337                 DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
4338 }
4339
4340 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
4341                                u32 *eir, u32 *eir_stuck)
4342 {
4343         u32 emr;
4344
4345         *eir = I915_READ(EIR);
4346
4347         I915_WRITE(EIR, *eir);
4348
4349         *eir_stuck = I915_READ(EIR);
4350         if (*eir_stuck == 0)
4351                 return;
4352
4353         /*
4354          * Toggle all EMR bits to make sure we get an edge
4355          * in the ISR master error bit if we don't clear
4356          * all the EIR bits. Otherwise the edge triggered
4357          * IIR on i965/g4x wouldn't notice that an interrupt
4358          * is still pending. Also some EIR bits can't be
4359          * cleared except by handling the underlying error
4360          * (or by a GPU reset) so we mask any bit that
4361          * remains set.
4362          */
4363         emr = I915_READ(EMR);
4364         I915_WRITE(EMR, 0xffffffff);
4365         I915_WRITE(EMR, emr | *eir_stuck);
4366 }
4367
4368 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
4369                                    u32 eir, u32 eir_stuck)
4370 {
4371         DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
4372
4373         if (eir_stuck)
4374                 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
4375 }
4376
4377 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4378 {
4379         struct drm_device *dev = arg;
4380         struct drm_i915_private *dev_priv = to_i915(dev);
4381         irqreturn_t ret = IRQ_NONE;
4382
4383         if (!intel_irqs_enabled(dev_priv))
4384                 return IRQ_NONE;
4385
4386         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4387         disable_rpm_wakeref_asserts(dev_priv);
4388
4389         do {
4390                 u32 pipe_stats[I915_MAX_PIPES] = {};
4391                 u16 eir = 0, eir_stuck = 0;
4392                 u16 iir;
4393
4394                 iir = I915_READ16(GEN2_IIR);
4395                 if (iir == 0)
4396                         break;
4397
4398                 ret = IRQ_HANDLED;
4399
4400                 /* Call regardless, as some status bits might not be
4401                  * signalled in iir */
4402                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4403
4404                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4405                         i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4406
4407                 I915_WRITE16(GEN2_IIR, iir);
4408
4409                 if (iir & I915_USER_INTERRUPT)
4410                         intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4411
4412                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4413                         i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4414
4415                 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4416         } while (0);
4417
4418         enable_rpm_wakeref_asserts(dev_priv);
4419
4420         return ret;
4421 }
4422
4423 static void i915_irq_reset(struct drm_device *dev)
4424 {
4425         struct drm_i915_private *dev_priv = to_i915(dev);
4426         struct intel_uncore *uncore = &dev_priv->uncore;
4427
4428         if (I915_HAS_HOTPLUG(dev_priv)) {
4429                 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4430                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4431         }
4432
4433         i9xx_pipestat_irq_reset(dev_priv);
4434
4435         GEN3_IRQ_RESET(uncore, GEN2_);
4436 }
4437
4438 static int i915_irq_postinstall(struct drm_device *dev)
4439 {
4440         struct drm_i915_private *dev_priv = to_i915(dev);
4441         struct intel_uncore *uncore = &dev_priv->uncore;
4442         u32 enable_mask;
4443
4444         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
4445                           I915_ERROR_MEMORY_REFRESH));
4446
4447         /* Unmask the interrupts that we always want on. */
4448         dev_priv->irq_mask =
4449                 ~(I915_ASLE_INTERRUPT |
4450                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4451                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4452                   I915_MASTER_ERROR_INTERRUPT);
4453
4454         enable_mask =
4455                 I915_ASLE_INTERRUPT |
4456                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4457                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4458                 I915_MASTER_ERROR_INTERRUPT |
4459                 I915_USER_INTERRUPT;
4460
4461         if (I915_HAS_HOTPLUG(dev_priv)) {
4462                 /* Enable in IER... */
4463                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4464                 /* and unmask in IMR */
4465                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4466         }
4467
4468         GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4469
4470         /* Interrupt setup is already guaranteed to be single-threaded, this is
4471          * just to make the assert_spin_locked check happy. */
4472         spin_lock_irq(&dev_priv->irq_lock);
4473         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4474         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4475         spin_unlock_irq(&dev_priv->irq_lock);
4476
4477         i915_enable_asle_pipestat(dev_priv);
4478
4479         return 0;
4480 }
4481
4482 static irqreturn_t i915_irq_handler(int irq, void *arg)
4483 {
4484         struct drm_device *dev = arg;
4485         struct drm_i915_private *dev_priv = to_i915(dev);
4486         irqreturn_t ret = IRQ_NONE;
4487
4488         if (!intel_irqs_enabled(dev_priv))
4489                 return IRQ_NONE;
4490
4491         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4492         disable_rpm_wakeref_asserts(dev_priv);
4493
4494         do {
4495                 u32 pipe_stats[I915_MAX_PIPES] = {};
4496                 u32 eir = 0, eir_stuck = 0;
4497                 u32 hotplug_status = 0;
4498                 u32 iir;
4499
4500                 iir = I915_READ(GEN2_IIR);
4501                 if (iir == 0)
4502                         break;
4503
4504                 ret = IRQ_HANDLED;
4505
4506                 if (I915_HAS_HOTPLUG(dev_priv) &&
4507                     iir & I915_DISPLAY_PORT_INTERRUPT)
4508                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4509
4510                 /* Call regardless, as some status bits might not be
4511                  * signalled in iir */
4512                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4513
4514                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4515                         i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4516
4517                 I915_WRITE(GEN2_IIR, iir);
4518
4519                 if (iir & I915_USER_INTERRUPT)
4520                         intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4521
4522                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4523                         i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4524
4525                 if (hotplug_status)
4526                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4527
4528                 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4529         } while (0);
4530
4531         enable_rpm_wakeref_asserts(dev_priv);
4532
4533         return ret;
4534 }
4535
4536 static void i965_irq_reset(struct drm_device *dev)
4537 {
4538         struct drm_i915_private *dev_priv = to_i915(dev);
4539         struct intel_uncore *uncore = &dev_priv->uncore;
4540
4541         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4542         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4543
4544         i9xx_pipestat_irq_reset(dev_priv);
4545
4546         GEN3_IRQ_RESET(uncore, GEN2_);
4547 }
4548
4549 static int i965_irq_postinstall(struct drm_device *dev)
4550 {
4551         struct drm_i915_private *dev_priv = to_i915(dev);
4552         struct intel_uncore *uncore = &dev_priv->uncore;
4553         u32 enable_mask;
4554         u32 error_mask;
4555
4556         /*
4557          * Enable some error detection, note the instruction error mask
4558          * bit is reserved, so we leave it masked.
4559          */
4560         if (IS_G4X(dev_priv)) {
4561                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4562                                GM45_ERROR_MEM_PRIV |
4563                                GM45_ERROR_CP_PRIV |
4564                                I915_ERROR_MEMORY_REFRESH);
4565         } else {
4566                 error_mask = ~(I915_ERROR_PAGE_TABLE |
4567                                I915_ERROR_MEMORY_REFRESH);
4568         }
4569         I915_WRITE(EMR, error_mask);
4570
4571         /* Unmask the interrupts that we always want on. */
4572         dev_priv->irq_mask =
4573                 ~(I915_ASLE_INTERRUPT |
4574                   I915_DISPLAY_PORT_INTERRUPT |
4575                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4576                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4577                   I915_MASTER_ERROR_INTERRUPT);
4578
4579         enable_mask =
4580                 I915_ASLE_INTERRUPT |
4581                 I915_DISPLAY_PORT_INTERRUPT |
4582                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4583                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4584                 I915_MASTER_ERROR_INTERRUPT |
4585                 I915_USER_INTERRUPT;
4586
4587         if (IS_G4X(dev_priv))
4588                 enable_mask |= I915_BSD_USER_INTERRUPT;
4589
4590         GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4591
4592         /* Interrupt setup is already guaranteed to be single-threaded, this is
4593          * just to make the assert_spin_locked check happy. */
4594         spin_lock_irq(&dev_priv->irq_lock);
4595         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4596         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4597         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4598         spin_unlock_irq(&dev_priv->irq_lock);
4599
4600         i915_enable_asle_pipestat(dev_priv);
4601
4602         return 0;
4603 }
4604
4605 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4606 {
4607         u32 hotplug_en;
4608
4609         lockdep_assert_held(&dev_priv->irq_lock);
4610
4611         /* Note HDMI and DP share hotplug bits */
4612         /* enable bits are the same for all generations */
4613         hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4614         /* Programming the CRT detection parameters tends
4615            to generate a spurious hotplug event about three
4616            seconds later.  So just do it once.
4617         */
4618         if (IS_G4X(dev_priv))
4619                 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4620         hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4621
4622         /* Ignore TV since it's buggy */
4623         i915_hotplug_interrupt_update_locked(dev_priv,
4624                                              HOTPLUG_INT_EN_MASK |
4625                                              CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4626                                              CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4627                                              hotplug_en);
4628 }
4629
4630 static irqreturn_t i965_irq_handler(int irq, void *arg)
4631 {
4632         struct drm_device *dev = arg;
4633         struct drm_i915_private *dev_priv = to_i915(dev);
4634         irqreturn_t ret = IRQ_NONE;
4635
4636         if (!intel_irqs_enabled(dev_priv))
4637                 return IRQ_NONE;
4638
4639         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4640         disable_rpm_wakeref_asserts(dev_priv);
4641
4642         do {
4643                 u32 pipe_stats[I915_MAX_PIPES] = {};
4644                 u32 eir = 0, eir_stuck = 0;
4645                 u32 hotplug_status = 0;
4646                 u32 iir;
4647
4648                 iir = I915_READ(GEN2_IIR);
4649                 if (iir == 0)
4650                         break;
4651
4652                 ret = IRQ_HANDLED;
4653
4654                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4655                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4656
4657                 /* Call regardless, as some status bits might not be
4658                  * signalled in iir */
4659                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4660
4661                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4662                         i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4663
4664                 I915_WRITE(GEN2_IIR, iir);
4665
4666                 if (iir & I915_USER_INTERRUPT)
4667                         intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4668
4669                 if (iir & I915_BSD_USER_INTERRUPT)
4670                         intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
4671
4672                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4673                         i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4674
4675                 if (hotplug_status)
4676                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4677
4678                 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4679         } while (0);
4680
4681         enable_rpm_wakeref_asserts(dev_priv);
4682
4683         return ret;
4684 }
4685
4686 /**
4687  * intel_irq_init - initializes irq support
4688  * @dev_priv: i915 device instance
4689  *
4690  * This function initializes all the irq support including work items, timers
4691  * and all the vtables. It does not setup the interrupt itself though.
4692  */
4693 void intel_irq_init(struct drm_i915_private *dev_priv)
4694 {
4695         struct drm_device *dev = &dev_priv->drm;
4696         struct intel_rps *rps = &dev_priv->gt_pm.rps;
4697         int i;
4698
4699         if (IS_I945GM(dev_priv))
4700                 i945gm_vblank_work_init(dev_priv);
4701
4702         intel_hpd_init_work(dev_priv);
4703
4704         INIT_WORK(&rps->work, gen6_pm_rps_work);
4705
4706         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4707         for (i = 0; i < MAX_L3_SLICES; ++i)
4708                 dev_priv->l3_parity.remap_info[i] = NULL;
4709
4710         if (HAS_GUC_SCHED(dev_priv))
4711                 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
4712
4713         /* Let's track the enabled rps events */
4714         if (IS_VALLEYVIEW(dev_priv))
4715                 /* WaGsvRC0ResidencyMethod:vlv */
4716                 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4717         else
4718                 dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
4719                                            GEN6_PM_RP_DOWN_THRESHOLD |
4720                                            GEN6_PM_RP_DOWN_TIMEOUT);
4721
4722         /* We share the register with other engine */
4723         if (INTEL_GEN(dev_priv) > 9)
4724                 GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000);
4725
4726         rps->pm_intrmsk_mbz = 0;
4727
4728         /*
4729          * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
4730          * if GEN6_PM_UP_EI_EXPIRED is masked.
4731          *
4732          * TODO: verify if this can be reproduced on VLV,CHV.
4733          */
4734         if (INTEL_GEN(dev_priv) <= 7)
4735                 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
4736
4737         if (INTEL_GEN(dev_priv) >= 8)
4738                 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
4739
4740         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
4741                 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4742         else if (INTEL_GEN(dev_priv) >= 3)
4743                 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4744
4745         dev->vblank_disable_immediate = true;
4746
4747         /* Most platforms treat the display irq block as an always-on
4748          * power domain. vlv/chv can disable it at runtime and need
4749          * special care to avoid writing any of the display block registers
4750          * outside of the power domain. We defer setting up the display irqs
4751          * in this case to the runtime pm.
4752          */
4753         dev_priv->display_irqs_enabled = true;
4754         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4755                 dev_priv->display_irqs_enabled = false;
4756
4757         dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4758         /* If we have MST support, we want to avoid doing short HPD IRQ storm
4759          * detection, as short HPD storms will occur as a natural part of
4760          * sideband messaging with MST.
4761          * On older platforms however, IRQ storms can occur with both long and
4762          * short pulses, as seen on some G4x systems.
4763          */
4764         dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
4765
4766         dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
4767         dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4768
4769         if (IS_CHERRYVIEW(dev_priv)) {
4770                 dev->driver->irq_handler = cherryview_irq_handler;
4771                 dev->driver->irq_preinstall = cherryview_irq_reset;
4772                 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4773                 dev->driver->irq_uninstall = cherryview_irq_reset;
4774                 dev->driver->enable_vblank = i965_enable_vblank;
4775                 dev->driver->disable_vblank = i965_disable_vblank;
4776                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4777         } else if (IS_VALLEYVIEW(dev_priv)) {
4778                 dev->driver->irq_handler = valleyview_irq_handler;
4779                 dev->driver->irq_preinstall = valleyview_irq_reset;
4780                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4781                 dev->driver->irq_uninstall = valleyview_irq_reset;
4782                 dev->driver->enable_vblank = i965_enable_vblank;
4783                 dev->driver->disable_vblank = i965_disable_vblank;
4784                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4785         } else if (INTEL_GEN(dev_priv) >= 11) {
4786                 dev->driver->irq_handler = gen11_irq_handler;
4787                 dev->driver->irq_preinstall = gen11_irq_reset;
4788                 dev->driver->irq_postinstall = gen11_irq_postinstall;
4789                 dev->driver->irq_uninstall = gen11_irq_reset;
4790                 dev->driver->enable_vblank = gen8_enable_vblank;
4791                 dev->driver->disable_vblank = gen8_disable_vblank;
4792                 dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
4793         } else if (INTEL_GEN(dev_priv) >= 8) {
4794                 dev->driver->irq_handler = gen8_irq_handler;
4795                 dev->driver->irq_preinstall = gen8_irq_reset;
4796                 dev->driver->irq_postinstall = gen8_irq_postinstall;
4797                 dev->driver->irq_uninstall = gen8_irq_reset;
4798                 dev->driver->enable_vblank = gen8_enable_vblank;
4799                 dev->driver->disable_vblank = gen8_disable_vblank;
4800                 if (IS_GEN9_LP(dev_priv))
4801                         dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4802                 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
4803                         dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4804                 else
4805                         dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4806         } else if (HAS_PCH_SPLIT(dev_priv)) {
4807                 dev->driver->irq_handler = ironlake_irq_handler;
4808                 dev->driver->irq_preinstall = ironlake_irq_reset;
4809                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4810                 dev->driver->irq_uninstall = ironlake_irq_reset;
4811                 dev->driver->enable_vblank = ironlake_enable_vblank;
4812                 dev->driver->disable_vblank = ironlake_disable_vblank;
4813                 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4814         } else {
4815                 if (IS_GEN(dev_priv, 2)) {
4816                         dev->driver->irq_preinstall = i8xx_irq_reset;
4817                         dev->driver->irq_postinstall = i8xx_irq_postinstall;
4818                         dev->driver->irq_handler = i8xx_irq_handler;
4819                         dev->driver->irq_uninstall = i8xx_irq_reset;
4820                         dev->driver->enable_vblank = i8xx_enable_vblank;
4821                         dev->driver->disable_vblank = i8xx_disable_vblank;
4822                 } else if (IS_I945GM(dev_priv)) {
4823                         dev->driver->irq_preinstall = i915_irq_reset;
4824                         dev->driver->irq_postinstall = i915_irq_postinstall;
4825                         dev->driver->irq_uninstall = i915_irq_reset;
4826                         dev->driver->irq_handler = i915_irq_handler;
4827                         dev->driver->enable_vblank = i945gm_enable_vblank;
4828                         dev->driver->disable_vblank = i945gm_disable_vblank;
4829                 } else if (IS_GEN(dev_priv, 3)) {
4830                         dev->driver->irq_preinstall = i915_irq_reset;
4831                         dev->driver->irq_postinstall = i915_irq_postinstall;
4832                         dev->driver->irq_uninstall = i915_irq_reset;
4833                         dev->driver->irq_handler = i915_irq_handler;
4834                         dev->driver->enable_vblank = i8xx_enable_vblank;
4835                         dev->driver->disable_vblank = i8xx_disable_vblank;
4836                 } else {
4837                         dev->driver->irq_preinstall = i965_irq_reset;
4838                         dev->driver->irq_postinstall = i965_irq_postinstall;
4839                         dev->driver->irq_uninstall = i965_irq_reset;
4840                         dev->driver->irq_handler = i965_irq_handler;
4841                         dev->driver->enable_vblank = i965_enable_vblank;
4842                         dev->driver->disable_vblank = i965_disable_vblank;
4843                 }
4844                 if (I915_HAS_HOTPLUG(dev_priv))
4845                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4846         }
4847 }
4848
4849 /**
4850  * intel_irq_fini - deinitializes IRQ support
4851  * @i915: i915 device instance
4852  *
4853  * This function deinitializes all the IRQ support.
4854  */
4855 void intel_irq_fini(struct drm_i915_private *i915)
4856 {
4857         int i;
4858
4859         if (IS_I945GM(i915))
4860                 i945gm_vblank_work_fini(i915);
4861
4862         for (i = 0; i < MAX_L3_SLICES; ++i)
4863                 kfree(i915->l3_parity.remap_info[i]);
4864 }
4865
4866 /**
4867  * intel_irq_install - enables the hardware interrupt
4868  * @dev_priv: i915 device instance
4869  *
4870  * This function enables the hardware interrupt handling, but leaves the hotplug
4871  * handling still disabled. It is called after intel_irq_init().
4872  *
4873  * In the driver load and resume code we need working interrupts in a few places
4874  * but don't want to deal with the hassle of concurrent probe and hotplug
4875  * workers. Hence the split into this two-stage approach.
4876  */
4877 int intel_irq_install(struct drm_i915_private *dev_priv)
4878 {
4879         /*
4880          * We enable some interrupt sources in our postinstall hooks, so mark
4881          * interrupts as enabled _before_ actually enabling them to avoid
4882          * special cases in our ordering checks.
4883          */
4884         dev_priv->runtime_pm.irqs_enabled = true;
4885
4886         return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
4887 }
4888
4889 /**
4890  * intel_irq_uninstall - finilizes all irq handling
4891  * @dev_priv: i915 device instance
4892  *
4893  * This stops interrupt and hotplug handling and unregisters and frees all
4894  * resources acquired in the init functions.
4895  */
4896 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4897 {
4898         drm_irq_uninstall(&dev_priv->drm);
4899         intel_hpd_cancel_work(dev_priv);
4900         dev_priv->runtime_pm.irqs_enabled = false;
4901 }
4902
4903 /**
4904  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4905  * @dev_priv: i915 device instance
4906  *
4907  * This function is used to disable interrupts at runtime, both in the runtime
4908  * pm and the system suspend/resume code.
4909  */
4910 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4911 {
4912         dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4913         dev_priv->runtime_pm.irqs_enabled = false;
4914         synchronize_irq(dev_priv->drm.irq);
4915 }
4916
4917 /**
4918  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4919  * @dev_priv: i915 device instance
4920  *
4921  * This function is used to enable interrupts at runtime, both in the runtime
4922  * pm and the system suspend/resume code.
4923  */
4924 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4925 {
4926         dev_priv->runtime_pm.irqs_enabled = true;
4927         dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
4928         dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
4929 }
This page took 0.315613 seconds and 4 git commands to generate.