]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/i915_irq.c
drm/amd/display: Filter out AC mode frequencies on DC mode systems
[linux.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/slab.h>
32 #include <linux/sysrq.h>
33
34 #include <drm/drm_drv.h>
35
36 #include "display/icl_dsi_regs.h"
37 #include "display/intel_de.h"
38 #include "display/intel_display_trace.h"
39 #include "display/intel_display_types.h"
40 #include "display/intel_fdi_regs.h"
41 #include "display/intel_fifo_underrun.h"
42 #include "display/intel_hotplug.h"
43 #include "display/intel_lpe_audio.h"
44 #include "display/intel_psr.h"
45 #include "display/intel_psr_regs.h"
46
47 #include "gt/intel_breadcrumbs.h"
48 #include "gt/intel_gt.h"
49 #include "gt/intel_gt_irq.h"
50 #include "gt/intel_gt_pm_irq.h"
51 #include "gt/intel_gt_regs.h"
52 #include "gt/intel_rps.h"
53
54 #include "i915_driver.h"
55 #include "i915_drv.h"
56 #include "i915_irq.h"
57
58 /**
59  * DOC: interrupt handling
60  *
61  * These functions provide the basic support for enabling and disabling the
62  * interrupt handling support. There's a lot more functionality in i915_irq.c
63  * and related files, but that will be described in separate chapters.
64  */
65
66 /*
67  * Interrupt statistic for PMU. Increments the counter only if the
68  * interrupt originated from the GPU so interrupts from a device which
69  * shares the interrupt line are not accounted.
70  */
71 static inline void pmu_irq_stats(struct drm_i915_private *i915,
72                                  irqreturn_t res)
73 {
74         if (unlikely(res != IRQ_HANDLED))
75                 return;
76
77         /*
78          * A clever compiler translates that into INC. A not so clever one
79          * should at least prevent store tearing.
80          */
81         WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
82 }
83
84 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
85 typedef u32 (*hotplug_enables_func)(struct intel_encoder *encoder);
86
87 static const u32 hpd_ilk[HPD_NUM_PINS] = {
88         [HPD_PORT_A] = DE_DP_A_HOTPLUG,
89 };
90
91 static const u32 hpd_ivb[HPD_NUM_PINS] = {
92         [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
93 };
94
95 static const u32 hpd_bdw[HPD_NUM_PINS] = {
96         [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
97 };
98
99 static const u32 hpd_ibx[HPD_NUM_PINS] = {
100         [HPD_CRT] = SDE_CRT_HOTPLUG,
101         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
102         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
103         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
104         [HPD_PORT_D] = SDE_PORTD_HOTPLUG,
105 };
106
107 static const u32 hpd_cpt[HPD_NUM_PINS] = {
108         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
109         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
110         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
111         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
112         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
113 };
114
115 static const u32 hpd_spt[HPD_NUM_PINS] = {
116         [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
117         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
118         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
119         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
120         [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
121 };
122
123 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
124         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
125         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
126         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
127         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
128         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
129         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
130 };
131
132 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
133         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
134         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
135         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
136         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
137         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
138         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
139 };
140
141 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
142         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
143         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
144         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
145         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
146         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
147         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
148 };
149
150 static const u32 hpd_bxt[HPD_NUM_PINS] = {
151         [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
152         [HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B),
153         [HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C),
154 };
155
156 static const u32 hpd_gen11[HPD_NUM_PINS] = {
157         [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1),
158         [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2),
159         [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3),
160         [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4),
161         [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5),
162         [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6),
163 };
164
165 static const u32 hpd_icp[HPD_NUM_PINS] = {
166         [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
167         [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
168         [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
169         [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1),
170         [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2),
171         [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3),
172         [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
173         [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5),
174         [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6),
175 };
176
177 static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
178         [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
179         [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
180         [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
181         [HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D),
182         [HPD_PORT_TC1] = SDE_TC_HOTPLUG_DG2(HPD_PORT_TC1),
183 };
184
185 static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
186 {
187         struct intel_hotplug *hpd = &dev_priv->display.hotplug;
188
189         if (HAS_GMCH(dev_priv)) {
190                 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
191                     IS_CHERRYVIEW(dev_priv))
192                         hpd->hpd = hpd_status_g4x;
193                 else
194                         hpd->hpd = hpd_status_i915;
195                 return;
196         }
197
198         if (DISPLAY_VER(dev_priv) >= 11)
199                 hpd->hpd = hpd_gen11;
200         else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
201                 hpd->hpd = hpd_bxt;
202         else if (DISPLAY_VER(dev_priv) == 9)
203                 hpd->hpd = NULL; /* no north HPD on SKL */
204         else if (DISPLAY_VER(dev_priv) >= 8)
205                 hpd->hpd = hpd_bdw;
206         else if (DISPLAY_VER(dev_priv) >= 7)
207                 hpd->hpd = hpd_ivb;
208         else
209                 hpd->hpd = hpd_ilk;
210
211         if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
212             (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
213                 return;
214
215         if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
216                 hpd->pch_hpd = hpd_sde_dg1;
217         else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
218                 hpd->pch_hpd = hpd_icp;
219         else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
220                 hpd->pch_hpd = hpd_spt;
221         else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
222                 hpd->pch_hpd = hpd_cpt;
223         else if (HAS_PCH_IBX(dev_priv))
224                 hpd->pch_hpd = hpd_ibx;
225         else
226                 MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
227 }
228
229 static void
230 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
231 {
232         struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
233
234         drm_crtc_handle_vblank(&crtc->base);
235 }
236
237 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
238                     i915_reg_t iir, i915_reg_t ier)
239 {
240         intel_uncore_write(uncore, imr, 0xffffffff);
241         intel_uncore_posting_read(uncore, imr);
242
243         intel_uncore_write(uncore, ier, 0);
244
245         /* IIR can theoretically queue up two events. Be paranoid. */
246         intel_uncore_write(uncore, iir, 0xffffffff);
247         intel_uncore_posting_read(uncore, iir);
248         intel_uncore_write(uncore, iir, 0xffffffff);
249         intel_uncore_posting_read(uncore, iir);
250 }
251
252 static void gen2_irq_reset(struct intel_uncore *uncore)
253 {
254         intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
255         intel_uncore_posting_read16(uncore, GEN2_IMR);
256
257         intel_uncore_write16(uncore, GEN2_IER, 0);
258
259         /* IIR can theoretically queue up two events. Be paranoid. */
260         intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
261         intel_uncore_posting_read16(uncore, GEN2_IIR);
262         intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
263         intel_uncore_posting_read16(uncore, GEN2_IIR);
264 }
265
266 /*
267  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
268  */
269 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
270 {
271         u32 val = intel_uncore_read(uncore, reg);
272
273         if (val == 0)
274                 return;
275
276         drm_WARN(&uncore->i915->drm, 1,
277                  "Interrupt register 0x%x is not zero: 0x%08x\n",
278                  i915_mmio_reg_offset(reg), val);
279         intel_uncore_write(uncore, reg, 0xffffffff);
280         intel_uncore_posting_read(uncore, reg);
281         intel_uncore_write(uncore, reg, 0xffffffff);
282         intel_uncore_posting_read(uncore, reg);
283 }
284
285 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
286 {
287         u16 val = intel_uncore_read16(uncore, GEN2_IIR);
288
289         if (val == 0)
290                 return;
291
292         drm_WARN(&uncore->i915->drm, 1,
293                  "Interrupt register 0x%x is not zero: 0x%08x\n",
294                  i915_mmio_reg_offset(GEN2_IIR), val);
295         intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
296         intel_uncore_posting_read16(uncore, GEN2_IIR);
297         intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
298         intel_uncore_posting_read16(uncore, GEN2_IIR);
299 }
300
301 void gen3_irq_init(struct intel_uncore *uncore,
302                    i915_reg_t imr, u32 imr_val,
303                    i915_reg_t ier, u32 ier_val,
304                    i915_reg_t iir)
305 {
306         gen3_assert_iir_is_zero(uncore, iir);
307
308         intel_uncore_write(uncore, ier, ier_val);
309         intel_uncore_write(uncore, imr, imr_val);
310         intel_uncore_posting_read(uncore, imr);
311 }
312
313 static void gen2_irq_init(struct intel_uncore *uncore,
314                           u32 imr_val, u32 ier_val)
315 {
316         gen2_assert_iir_is_zero(uncore);
317
318         intel_uncore_write16(uncore, GEN2_IER, ier_val);
319         intel_uncore_write16(uncore, GEN2_IMR, imr_val);
320         intel_uncore_posting_read16(uncore, GEN2_IMR);
321 }
322
323 /* For display hotplug interrupt */
324 static inline void
325 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
326                                      u32 mask,
327                                      u32 bits)
328 {
329         lockdep_assert_held(&dev_priv->irq_lock);
330         drm_WARN_ON(&dev_priv->drm, bits & ~mask);
331
332         intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_EN, mask, bits);
333 }
334
335 /**
336  * i915_hotplug_interrupt_update - update hotplug interrupt enable
337  * @dev_priv: driver private
338  * @mask: bits to update
339  * @bits: bits to enable
340  * NOTE: the HPD enable bits are modified both inside and outside
341  * of an interrupt context. To avoid that read-modify-write cycles
342  * interfer, these bits are protected by a spinlock. Since this
343  * function is usually not called from a context where the lock is
344  * held already, this function acquires the lock itself. A non-locking
345  * version is also available.
346  */
347 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
348                                    u32 mask,
349                                    u32 bits)
350 {
351         spin_lock_irq(&dev_priv->irq_lock);
352         i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
353         spin_unlock_irq(&dev_priv->irq_lock);
354 }
355
356 /**
357  * ilk_update_display_irq - update DEIMR
358  * @dev_priv: driver private
359  * @interrupt_mask: mask of interrupt bits to update
360  * @enabled_irq_mask: mask of interrupt bits to enable
361  */
362 static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
363                                    u32 interrupt_mask, u32 enabled_irq_mask)
364 {
365         u32 new_val;
366
367         lockdep_assert_held(&dev_priv->irq_lock);
368         drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
369
370         new_val = dev_priv->irq_mask;
371         new_val &= ~interrupt_mask;
372         new_val |= (~enabled_irq_mask & interrupt_mask);
373
374         if (new_val != dev_priv->irq_mask &&
375             !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
376                 dev_priv->irq_mask = new_val;
377                 intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
378                 intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
379         }
380 }
381
382 void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
383 {
384         ilk_update_display_irq(i915, bits, bits);
385 }
386
387 void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
388 {
389         ilk_update_display_irq(i915, bits, 0);
390 }
391
392 /**
393  * bdw_update_port_irq - update DE port interrupt
394  * @dev_priv: driver private
395  * @interrupt_mask: mask of interrupt bits to update
396  * @enabled_irq_mask: mask of interrupt bits to enable
397  */
398 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
399                                 u32 interrupt_mask,
400                                 u32 enabled_irq_mask)
401 {
402         u32 new_val;
403         u32 old_val;
404
405         lockdep_assert_held(&dev_priv->irq_lock);
406
407         drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
408
409         if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
410                 return;
411
412         old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
413
414         new_val = old_val;
415         new_val &= ~interrupt_mask;
416         new_val |= (~enabled_irq_mask & interrupt_mask);
417
418         if (new_val != old_val) {
419                 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
420                 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
421         }
422 }
423
424 /**
425  * bdw_update_pipe_irq - update DE pipe interrupt
426  * @dev_priv: driver private
427  * @pipe: pipe whose interrupt to update
428  * @interrupt_mask: mask of interrupt bits to update
429  * @enabled_irq_mask: mask of interrupt bits to enable
430  */
431 static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
432                                 enum pipe pipe, u32 interrupt_mask,
433                                 u32 enabled_irq_mask)
434 {
435         u32 new_val;
436
437         lockdep_assert_held(&dev_priv->irq_lock);
438
439         drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
440
441         if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
442                 return;
443
444         new_val = dev_priv->de_irq_mask[pipe];
445         new_val &= ~interrupt_mask;
446         new_val |= (~enabled_irq_mask & interrupt_mask);
447
448         if (new_val != dev_priv->de_irq_mask[pipe]) {
449                 dev_priv->de_irq_mask[pipe] = new_val;
450                 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
451                 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
452         }
453 }
454
455 void bdw_enable_pipe_irq(struct drm_i915_private *i915,
456                          enum pipe pipe, u32 bits)
457 {
458         bdw_update_pipe_irq(i915, pipe, bits, bits);
459 }
460
461 void bdw_disable_pipe_irq(struct drm_i915_private *i915,
462                           enum pipe pipe, u32 bits)
463 {
464         bdw_update_pipe_irq(i915, pipe, bits, 0);
465 }
466
467 /**
468  * ibx_display_interrupt_update - update SDEIMR
469  * @dev_priv: driver private
470  * @interrupt_mask: mask of interrupt bits to update
471  * @enabled_irq_mask: mask of interrupt bits to enable
472  */
473 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
474                                          u32 interrupt_mask,
475                                          u32 enabled_irq_mask)
476 {
477         u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
478         sdeimr &= ~interrupt_mask;
479         sdeimr |= (~enabled_irq_mask & interrupt_mask);
480
481         drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
482
483         lockdep_assert_held(&dev_priv->irq_lock);
484
485         if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
486                 return;
487
488         intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
489         intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
490 }
491
492 void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
493 {
494         ibx_display_interrupt_update(i915, bits, bits);
495 }
496
497 void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
498 {
499         ibx_display_interrupt_update(i915, bits, 0);
500 }
501
502 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
503                               enum pipe pipe)
504 {
505         u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
506         u32 enable_mask = status_mask << 16;
507
508         lockdep_assert_held(&dev_priv->irq_lock);
509
510         if (DISPLAY_VER(dev_priv) < 5)
511                 goto out;
512
513         /*
514          * On pipe A we don't support the PSR interrupt yet,
515          * on pipe B and C the same bit MBZ.
516          */
517         if (drm_WARN_ON_ONCE(&dev_priv->drm,
518                              status_mask & PIPE_A_PSR_STATUS_VLV))
519                 return 0;
520         /*
521          * On pipe B and C we don't support the PSR interrupt yet, on pipe
522          * A the same bit is for perf counters which we don't use either.
523          */
524         if (drm_WARN_ON_ONCE(&dev_priv->drm,
525                              status_mask & PIPE_B_PSR_STATUS_VLV))
526                 return 0;
527
528         enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
529                          SPRITE0_FLIP_DONE_INT_EN_VLV |
530                          SPRITE1_FLIP_DONE_INT_EN_VLV);
531         if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
532                 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
533         if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
534                 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
535
536 out:
537         drm_WARN_ONCE(&dev_priv->drm,
538                       enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
539                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
540                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
541                       pipe_name(pipe), enable_mask, status_mask);
542
543         return enable_mask;
544 }
545
546 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
547                           enum pipe pipe, u32 status_mask)
548 {
549         i915_reg_t reg = PIPESTAT(pipe);
550         u32 enable_mask;
551
552         drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
553                       "pipe %c: status_mask=0x%x\n",
554                       pipe_name(pipe), status_mask);
555
556         lockdep_assert_held(&dev_priv->irq_lock);
557         drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
558
559         if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
560                 return;
561
562         dev_priv->pipestat_irq_mask[pipe] |= status_mask;
563         enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
564
565         intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
566         intel_uncore_posting_read(&dev_priv->uncore, reg);
567 }
568
569 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
570                            enum pipe pipe, u32 status_mask)
571 {
572         i915_reg_t reg = PIPESTAT(pipe);
573         u32 enable_mask;
574
575         drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
576                       "pipe %c: status_mask=0x%x\n",
577                       pipe_name(pipe), status_mask);
578
579         lockdep_assert_held(&dev_priv->irq_lock);
580         drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
581
582         if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
583                 return;
584
585         dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
586         enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
587
588         intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
589         intel_uncore_posting_read(&dev_priv->uncore, reg);
590 }
591
592 static bool i915_has_asle(struct drm_i915_private *dev_priv)
593 {
594         if (!dev_priv->display.opregion.asle)
595                 return false;
596
597         return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
598 }
599
600 /**
601  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
602  * @dev_priv: i915 device private
603  */
604 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
605 {
606         if (!i915_has_asle(dev_priv))
607                 return;
608
609         spin_lock_irq(&dev_priv->irq_lock);
610
611         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
612         if (DISPLAY_VER(dev_priv) >= 4)
613                 i915_enable_pipestat(dev_priv, PIPE_A,
614                                      PIPE_LEGACY_BLC_EVENT_STATUS);
615
616         spin_unlock_irq(&dev_priv->irq_lock);
617 }
618
619 /**
620  * ivb_parity_work - Workqueue called when a parity error interrupt
621  * occurred.
622  * @work: workqueue struct
623  *
624  * Doesn't actually do anything except notify userspace. As a consequence of
625  * this event, userspace should try to remap the bad rows since statistically
626  * it is likely the same row is more likely to go bad again.
627  */
628 static void ivb_parity_work(struct work_struct *work)
629 {
630         struct drm_i915_private *dev_priv =
631                 container_of(work, typeof(*dev_priv), l3_parity.error_work);
632         struct intel_gt *gt = to_gt(dev_priv);
633         u32 error_status, row, bank, subbank;
634         char *parity_event[6];
635         u32 misccpctl;
636         u8 slice = 0;
637
638         /* We must turn off DOP level clock gating to access the L3 registers.
639          * In order to prevent a get/put style interface, acquire struct mutex
640          * any time we access those registers.
641          */
642         mutex_lock(&dev_priv->drm.struct_mutex);
643
644         /* If we've screwed up tracking, just let the interrupt fire again */
645         if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
646                 goto out;
647
648         misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL,
649                                      GEN7_DOP_CLOCK_GATE_ENABLE, 0);
650         intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
651
652         while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
653                 i915_reg_t reg;
654
655                 slice--;
656                 if (drm_WARN_ON_ONCE(&dev_priv->drm,
657                                      slice >= NUM_L3_SLICES(dev_priv)))
658                         break;
659
660                 dev_priv->l3_parity.which_slice &= ~(1<<slice);
661
662                 reg = GEN7_L3CDERRST1(slice);
663
664                 error_status = intel_uncore_read(&dev_priv->uncore, reg);
665                 row = GEN7_PARITY_ERROR_ROW(error_status);
666                 bank = GEN7_PARITY_ERROR_BANK(error_status);
667                 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
668
669                 intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
670                 intel_uncore_posting_read(&dev_priv->uncore, reg);
671
672                 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
673                 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
674                 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
675                 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
676                 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
677                 parity_event[5] = NULL;
678
679                 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
680                                    KOBJ_CHANGE, parity_event);
681
682                 drm_dbg(&dev_priv->drm,
683                         "Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
684                         slice, row, bank, subbank);
685
686                 kfree(parity_event[4]);
687                 kfree(parity_event[3]);
688                 kfree(parity_event[2]);
689                 kfree(parity_event[1]);
690         }
691
692         intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
693
694 out:
695         drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
696         spin_lock_irq(gt->irq_lock);
697         gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
698         spin_unlock_irq(gt->irq_lock);
699
700         mutex_unlock(&dev_priv->drm.struct_mutex);
701 }
702
703 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
704 {
705         switch (pin) {
706         case HPD_PORT_TC1:
707         case HPD_PORT_TC2:
708         case HPD_PORT_TC3:
709         case HPD_PORT_TC4:
710         case HPD_PORT_TC5:
711         case HPD_PORT_TC6:
712                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin);
713         default:
714                 return false;
715         }
716 }
717
718 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
719 {
720         switch (pin) {
721         case HPD_PORT_A:
722                 return val & PORTA_HOTPLUG_LONG_DETECT;
723         case HPD_PORT_B:
724                 return val & PORTB_HOTPLUG_LONG_DETECT;
725         case HPD_PORT_C:
726                 return val & PORTC_HOTPLUG_LONG_DETECT;
727         default:
728                 return false;
729         }
730 }
731
732 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
733 {
734         switch (pin) {
735         case HPD_PORT_A:
736         case HPD_PORT_B:
737         case HPD_PORT_C:
738         case HPD_PORT_D:
739                 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin);
740         default:
741                 return false;
742         }
743 }
744
745 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
746 {
747         switch (pin) {
748         case HPD_PORT_TC1:
749         case HPD_PORT_TC2:
750         case HPD_PORT_TC3:
751         case HPD_PORT_TC4:
752         case HPD_PORT_TC5:
753         case HPD_PORT_TC6:
754                 return val & ICP_TC_HPD_LONG_DETECT(pin);
755         default:
756                 return false;
757         }
758 }
759
760 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
761 {
762         switch (pin) {
763         case HPD_PORT_E:
764                 return val & PORTE_HOTPLUG_LONG_DETECT;
765         default:
766                 return false;
767         }
768 }
769
770 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
771 {
772         switch (pin) {
773         case HPD_PORT_A:
774                 return val & PORTA_HOTPLUG_LONG_DETECT;
775         case HPD_PORT_B:
776                 return val & PORTB_HOTPLUG_LONG_DETECT;
777         case HPD_PORT_C:
778                 return val & PORTC_HOTPLUG_LONG_DETECT;
779         case HPD_PORT_D:
780                 return val & PORTD_HOTPLUG_LONG_DETECT;
781         default:
782                 return false;
783         }
784 }
785
786 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
787 {
788         switch (pin) {
789         case HPD_PORT_A:
790                 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
791         default:
792                 return false;
793         }
794 }
795
796 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
797 {
798         switch (pin) {
799         case HPD_PORT_B:
800                 return val & PORTB_HOTPLUG_LONG_DETECT;
801         case HPD_PORT_C:
802                 return val & PORTC_HOTPLUG_LONG_DETECT;
803         case HPD_PORT_D:
804                 return val & PORTD_HOTPLUG_LONG_DETECT;
805         default:
806                 return false;
807         }
808 }
809
810 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
811 {
812         switch (pin) {
813         case HPD_PORT_B:
814                 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
815         case HPD_PORT_C:
816                 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
817         case HPD_PORT_D:
818                 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
819         default:
820                 return false;
821         }
822 }
823
824 /*
825  * Get a bit mask of pins that have triggered, and which ones may be long.
826  * This can be called multiple times with the same masks to accumulate
827  * hotplug detection results from several registers.
828  *
829  * Note that the caller is expected to zero out the masks initially.
830  */
831 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
832                                u32 *pin_mask, u32 *long_mask,
833                                u32 hotplug_trigger, u32 dig_hotplug_reg,
834                                const u32 hpd[HPD_NUM_PINS],
835                                bool long_pulse_detect(enum hpd_pin pin, u32 val))
836 {
837         enum hpd_pin pin;
838
839         BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
840
841         for_each_hpd_pin(pin) {
842                 if ((hpd[pin] & hotplug_trigger) == 0)
843                         continue;
844
845                 *pin_mask |= BIT(pin);
846
847                 if (long_pulse_detect(pin, dig_hotplug_reg))
848                         *long_mask |= BIT(pin);
849         }
850
851         drm_dbg(&dev_priv->drm,
852                 "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
853                 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
854
855 }
856
857 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
858                                   const u32 hpd[HPD_NUM_PINS])
859 {
860         struct intel_encoder *encoder;
861         u32 enabled_irqs = 0;
862
863         for_each_intel_encoder(&dev_priv->drm, encoder)
864                 if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
865                         enabled_irqs |= hpd[encoder->hpd_pin];
866
867         return enabled_irqs;
868 }
869
870 static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
871                                   const u32 hpd[HPD_NUM_PINS])
872 {
873         struct intel_encoder *encoder;
874         u32 hotplug_irqs = 0;
875
876         for_each_intel_encoder(&dev_priv->drm, encoder)
877                 hotplug_irqs |= hpd[encoder->hpd_pin];
878
879         return hotplug_irqs;
880 }
881
882 static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
883                                      hotplug_enables_func hotplug_enables)
884 {
885         struct intel_encoder *encoder;
886         u32 hotplug = 0;
887
888         for_each_intel_encoder(&i915->drm, encoder)
889                 hotplug |= hotplug_enables(encoder);
890
891         return hotplug;
892 }
893
894 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
895 {
896         wake_up_all(&dev_priv->display.gmbus.wait_queue);
897 }
898
899 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
900 {
901         wake_up_all(&dev_priv->display.gmbus.wait_queue);
902 }
903
904 #if defined(CONFIG_DEBUG_FS)
905 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
906                                          enum pipe pipe,
907                                          u32 crc0, u32 crc1,
908                                          u32 crc2, u32 crc3,
909                                          u32 crc4)
910 {
911         struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
912         struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
913         u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
914
915         trace_intel_pipe_crc(crtc, crcs);
916
917         spin_lock(&pipe_crc->lock);
918         /*
919          * For some not yet identified reason, the first CRC is
920          * bonkers. So let's just wait for the next vblank and read
921          * out the buggy result.
922          *
923          * On GEN8+ sometimes the second CRC is bonkers as well, so
924          * don't trust that one either.
925          */
926         if (pipe_crc->skipped <= 0 ||
927             (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
928                 pipe_crc->skipped++;
929                 spin_unlock(&pipe_crc->lock);
930                 return;
931         }
932         spin_unlock(&pipe_crc->lock);
933
934         drm_crtc_add_crc_entry(&crtc->base, true,
935                                 drm_crtc_accurate_vblank_count(&crtc->base),
936                                 crcs);
937 }
938 #else
939 static inline void
940 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
941                              enum pipe pipe,
942                              u32 crc0, u32 crc1,
943                              u32 crc2, u32 crc3,
944                              u32 crc4) {}
945 #endif
946
947 static void flip_done_handler(struct drm_i915_private *i915,
948                               enum pipe pipe)
949 {
950         struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
951         struct drm_crtc_state *crtc_state = crtc->base.state;
952         struct drm_pending_vblank_event *e = crtc_state->event;
953         struct drm_device *dev = &i915->drm;
954         unsigned long irqflags;
955
956         spin_lock_irqsave(&dev->event_lock, irqflags);
957
958         crtc_state->event = NULL;
959
960         drm_crtc_send_vblank_event(&crtc->base, e);
961
962         spin_unlock_irqrestore(&dev->event_lock, irqflags);
963 }
964
965 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
966                                      enum pipe pipe)
967 {
968         display_pipe_crc_irq_handler(dev_priv, pipe,
969                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
970                                      0, 0, 0, 0);
971 }
972
973 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
974                                      enum pipe pipe)
975 {
976         display_pipe_crc_irq_handler(dev_priv, pipe,
977                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
978                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
979                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
980                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
981                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
982 }
983
984 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
985                                       enum pipe pipe)
986 {
987         u32 res1, res2;
988
989         if (DISPLAY_VER(dev_priv) >= 3)
990                 res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
991         else
992                 res1 = 0;
993
994         if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
995                 res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
996         else
997                 res2 = 0;
998
999         display_pipe_crc_irq_handler(dev_priv, pipe,
1000                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
1001                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
1002                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
1003                                      res1, res2);
1004 }
1005
1006 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1007 {
1008         enum pipe pipe;
1009
1010         for_each_pipe(dev_priv, pipe) {
1011                 intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
1012                            PIPESTAT_INT_STATUS_MASK |
1013                            PIPE_FIFO_UNDERRUN_STATUS);
1014
1015                 dev_priv->pipestat_irq_mask[pipe] = 0;
1016         }
1017 }
1018
1019 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1020                                   u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1021 {
1022         enum pipe pipe;
1023
1024         spin_lock(&dev_priv->irq_lock);
1025
1026         if (!dev_priv->display_irqs_enabled) {
1027                 spin_unlock(&dev_priv->irq_lock);
1028                 return;
1029         }
1030
1031         for_each_pipe(dev_priv, pipe) {
1032                 i915_reg_t reg;
1033                 u32 status_mask, enable_mask, iir_bit = 0;
1034
1035                 /*
1036                  * PIPESTAT bits get signalled even when the interrupt is
1037                  * disabled with the mask bits, and some of the status bits do
1038                  * not generate interrupts at all (like the underrun bit). Hence
1039                  * we need to be careful that we only handle what we want to
1040                  * handle.
1041                  */
1042
1043                 /* fifo underruns are filterered in the underrun handler. */
1044                 status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1045
1046                 switch (pipe) {
1047                 default:
1048                 case PIPE_A:
1049                         iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1050                         break;
1051                 case PIPE_B:
1052                         iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1053                         break;
1054                 case PIPE_C:
1055                         iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1056                         break;
1057                 }
1058                 if (iir & iir_bit)
1059                         status_mask |= dev_priv->pipestat_irq_mask[pipe];
1060
1061                 if (!status_mask)
1062                         continue;
1063
1064                 reg = PIPESTAT(pipe);
1065                 pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
1066                 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1067
1068                 /*
1069                  * Clear the PIPE*STAT regs before the IIR
1070                  *
1071                  * Toggle the enable bits to make sure we get an
1072                  * edge in the ISR pipe event bit if we don't clear
1073                  * all the enabled status bits. Otherwise the edge
1074                  * triggered IIR on i965/g4x wouldn't notice that
1075                  * an interrupt is still pending.
1076                  */
1077                 if (pipe_stats[pipe]) {
1078                         intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
1079                         intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
1080                 }
1081         }
1082         spin_unlock(&dev_priv->irq_lock);
1083 }
1084
1085 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1086                                       u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1087 {
1088         enum pipe pipe;
1089
1090         for_each_pipe(dev_priv, pipe) {
1091                 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1092                         intel_handle_vblank(dev_priv, pipe);
1093
1094                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1095                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1096
1097                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1098                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1099         }
1100 }
1101
1102 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1103                                       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1104 {
1105         bool blc_event = false;
1106         enum pipe pipe;
1107
1108         for_each_pipe(dev_priv, pipe) {
1109                 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1110                         intel_handle_vblank(dev_priv, pipe);
1111
1112                 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1113                         blc_event = true;
1114
1115                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1116                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1117
1118                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1119                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1120         }
1121
1122         if (blc_event || (iir & I915_ASLE_INTERRUPT))
1123                 intel_opregion_asle_intr(dev_priv);
1124 }
1125
1126 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1127                                       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1128 {
1129         bool blc_event = false;
1130         enum pipe pipe;
1131
1132         for_each_pipe(dev_priv, pipe) {
1133                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1134                         intel_handle_vblank(dev_priv, pipe);
1135
1136                 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1137                         blc_event = true;
1138
1139                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1140                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1141
1142                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1143                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1144         }
1145
1146         if (blc_event || (iir & I915_ASLE_INTERRUPT))
1147                 intel_opregion_asle_intr(dev_priv);
1148
1149         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1150                 gmbus_irq_handler(dev_priv);
1151 }
1152
1153 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1154                                             u32 pipe_stats[I915_MAX_PIPES])
1155 {
1156         enum pipe pipe;
1157
1158         for_each_pipe(dev_priv, pipe) {
1159                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1160                         intel_handle_vblank(dev_priv, pipe);
1161
1162                 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1163                         flip_done_handler(dev_priv, pipe);
1164
1165                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1166                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1167
1168                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1169                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1170         }
1171
1172         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1173                 gmbus_irq_handler(dev_priv);
1174 }
1175
1176 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1177 {
1178         u32 hotplug_status = 0, hotplug_status_mask;
1179         int i;
1180
1181         if (IS_G4X(dev_priv) ||
1182             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1183                 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
1184                         DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
1185         else
1186                 hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1187
1188         /*
1189          * We absolutely have to clear all the pending interrupt
1190          * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
1191          * interrupt bit won't have an edge, and the i965/g4x
1192          * edge triggered IIR will not notice that an interrupt
1193          * is still pending. We can't use PORT_HOTPLUG_EN to
1194          * guarantee the edge as the act of toggling the enable
1195          * bits can itself generate a new hotplug interrupt :(
1196          */
1197         for (i = 0; i < 10; i++) {
1198                 u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask;
1199
1200                 if (tmp == 0)
1201                         return hotplug_status;
1202
1203                 hotplug_status |= tmp;
1204                 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status);
1205         }
1206
1207         drm_WARN_ONCE(&dev_priv->drm, 1,
1208                       "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1209                       intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
1210
1211         return hotplug_status;
1212 }
1213
1214 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1215                                  u32 hotplug_status)
1216 {
1217         u32 pin_mask = 0, long_mask = 0;
1218         u32 hotplug_trigger;
1219
1220         if (IS_G4X(dev_priv) ||
1221             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1222                 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1223         else
1224                 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1225
1226         if (hotplug_trigger) {
1227                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1228                                    hotplug_trigger, hotplug_trigger,
1229                                    dev_priv->display.hotplug.hpd,
1230                                    i9xx_port_hotplug_long_detect);
1231
1232                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1233         }
1234
1235         if ((IS_G4X(dev_priv) ||
1236              IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1237             hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1238                 dp_aux_irq_handler(dev_priv);
1239 }
1240
1241 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1242 {
1243         struct drm_i915_private *dev_priv = arg;
1244         irqreturn_t ret = IRQ_NONE;
1245
1246         if (!intel_irqs_enabled(dev_priv))
1247                 return IRQ_NONE;
1248
1249         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1250         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1251
1252         do {
1253                 u32 iir, gt_iir, pm_iir;
1254                 u32 pipe_stats[I915_MAX_PIPES] = {};
1255                 u32 hotplug_status = 0;
1256                 u32 ier = 0;
1257
1258                 gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
1259                 pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
1260                 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1261
1262                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1263                         break;
1264
1265                 ret = IRQ_HANDLED;
1266
1267                 /*
1268                  * Theory on interrupt generation, based on empirical evidence:
1269                  *
1270                  * x = ((VLV_IIR & VLV_IER) ||
1271                  *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1272                  *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1273                  *
1274                  * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1275                  * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1276                  * guarantee the CPU interrupt will be raised again even if we
1277                  * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1278                  * bits this time around.
1279                  */
1280                 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
1281                 ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
1282
1283                 if (gt_iir)
1284                         intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
1285                 if (pm_iir)
1286                         intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
1287
1288                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1289                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1290
1291                 /* Call regardless, as some status bits might not be
1292                  * signalled in iir */
1293                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1294
1295                 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1296                            I915_LPE_PIPE_B_INTERRUPT))
1297                         intel_lpe_audio_irq_handler(dev_priv);
1298
1299                 /*
1300                  * VLV_IIR is single buffered, and reflects the level
1301                  * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1302                  */
1303                 if (iir)
1304                         intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1305
1306                 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1307                 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1308
1309                 if (gt_iir)
1310                         gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
1311                 if (pm_iir)
1312                         gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
1313
1314                 if (hotplug_status)
1315                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1316
1317                 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1318         } while (0);
1319
1320         pmu_irq_stats(dev_priv, ret);
1321
1322         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1323
1324         return ret;
1325 }
1326
1327 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1328 {
1329         struct drm_i915_private *dev_priv = arg;
1330         irqreturn_t ret = IRQ_NONE;
1331
1332         if (!intel_irqs_enabled(dev_priv))
1333                 return IRQ_NONE;
1334
1335         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1336         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1337
1338         do {
1339                 u32 master_ctl, iir;
1340                 u32 pipe_stats[I915_MAX_PIPES] = {};
1341                 u32 hotplug_status = 0;
1342                 u32 ier = 0;
1343
1344                 master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1345                 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1346
1347                 if (master_ctl == 0 && iir == 0)
1348                         break;
1349
1350                 ret = IRQ_HANDLED;
1351
1352                 /*
1353                  * Theory on interrupt generation, based on empirical evidence:
1354                  *
1355                  * x = ((VLV_IIR & VLV_IER) ||
1356                  *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1357                  *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1358                  *
1359                  * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1360                  * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1361                  * guarantee the CPU interrupt will be raised again even if we
1362                  * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1363                  * bits this time around.
1364                  */
1365                 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
1366                 ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
1367
1368                 gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
1369
1370                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1371                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1372
1373                 /* Call regardless, as some status bits might not be
1374                  * signalled in iir */
1375                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1376
1377                 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1378                            I915_LPE_PIPE_B_INTERRUPT |
1379                            I915_LPE_PIPE_C_INTERRUPT))
1380                         intel_lpe_audio_irq_handler(dev_priv);
1381
1382                 /*
1383                  * VLV_IIR is single buffered, and reflects the level
1384                  * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1385                  */
1386                 if (iir)
1387                         intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1388
1389                 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1390                 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1391
1392                 if (hotplug_status)
1393                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1394
1395                 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1396         } while (0);
1397
1398         pmu_irq_stats(dev_priv, ret);
1399
1400         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1401
1402         return ret;
1403 }
1404
1405 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1406                                 u32 hotplug_trigger)
1407 {
1408         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1409
1410         /*
1411          * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1412          * unless we touch the hotplug register, even if hotplug_trigger is
1413          * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1414          * errors.
1415          */
1416         dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
1417         if (!hotplug_trigger) {
1418                 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1419                         PORTD_HOTPLUG_STATUS_MASK |
1420                         PORTC_HOTPLUG_STATUS_MASK |
1421                         PORTB_HOTPLUG_STATUS_MASK;
1422                 dig_hotplug_reg &= ~mask;
1423         }
1424
1425         intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
1426         if (!hotplug_trigger)
1427                 return;
1428
1429         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1430                            hotplug_trigger, dig_hotplug_reg,
1431                            dev_priv->display.hotplug.pch_hpd,
1432                            pch_port_hotplug_long_detect);
1433
1434         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1435 }
1436
1437 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1438 {
1439         enum pipe pipe;
1440         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1441
1442         ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1443
1444         if (pch_iir & SDE_AUDIO_POWER_MASK) {
1445                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1446                                SDE_AUDIO_POWER_SHIFT);
1447                 drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
1448                         port_name(port));
1449         }
1450
1451         if (pch_iir & SDE_AUX_MASK)
1452                 dp_aux_irq_handler(dev_priv);
1453
1454         if (pch_iir & SDE_GMBUS)
1455                 gmbus_irq_handler(dev_priv);
1456
1457         if (pch_iir & SDE_AUDIO_HDCP_MASK)
1458                 drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1459
1460         if (pch_iir & SDE_AUDIO_TRANS_MASK)
1461                 drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1462
1463         if (pch_iir & SDE_POISON)
1464                 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1465
1466         if (pch_iir & SDE_FDI_MASK) {
1467                 for_each_pipe(dev_priv, pipe)
1468                         drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
1469                                 pipe_name(pipe),
1470                                 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1471         }
1472
1473         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1474                 drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1475
1476         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1477                 drm_dbg(&dev_priv->drm,
1478                         "PCH transcoder CRC error interrupt\n");
1479
1480         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1481                 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1482
1483         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1484                 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1485 }
1486
1487 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1488 {
1489         u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
1490         enum pipe pipe;
1491
1492         if (err_int & ERR_INT_POISON)
1493                 drm_err(&dev_priv->drm, "Poison interrupt\n");
1494
1495         for_each_pipe(dev_priv, pipe) {
1496                 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1497                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1498
1499                 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1500                         if (IS_IVYBRIDGE(dev_priv))
1501                                 ivb_pipe_crc_irq_handler(dev_priv, pipe);
1502                         else
1503                                 hsw_pipe_crc_irq_handler(dev_priv, pipe);
1504                 }
1505         }
1506
1507         intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
1508 }
1509
1510 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1511 {
1512         u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
1513         enum pipe pipe;
1514
1515         if (serr_int & SERR_INT_POISON)
1516                 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1517
1518         for_each_pipe(dev_priv, pipe)
1519                 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
1520                         intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1521
1522         intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
1523 }
1524
1525 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1526 {
1527         enum pipe pipe;
1528         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1529
1530         ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1531
1532         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1533                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1534                                SDE_AUDIO_POWER_SHIFT_CPT);
1535                 drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
1536                         port_name(port));
1537         }
1538
1539         if (pch_iir & SDE_AUX_MASK_CPT)
1540                 dp_aux_irq_handler(dev_priv);
1541
1542         if (pch_iir & SDE_GMBUS_CPT)
1543                 gmbus_irq_handler(dev_priv);
1544
1545         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1546                 drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1547
1548         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1549                 drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1550
1551         if (pch_iir & SDE_FDI_MASK_CPT) {
1552                 for_each_pipe(dev_priv, pipe)
1553                         drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
1554                                 pipe_name(pipe),
1555                                 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1556         }
1557
1558         if (pch_iir & SDE_ERROR_CPT)
1559                 cpt_serr_int_handler(dev_priv);
1560 }
1561
1562 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1563 {
1564         u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
1565         u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
1566         u32 pin_mask = 0, long_mask = 0;
1567
1568         if (ddi_hotplug_trigger) {
1569                 u32 dig_hotplug_reg;
1570
1571                 /* Locking due to DSI native GPIO sequences */
1572                 spin_lock(&dev_priv->irq_lock);
1573                 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0);
1574                 spin_unlock(&dev_priv->irq_lock);
1575
1576                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1577                                    ddi_hotplug_trigger, dig_hotplug_reg,
1578                                    dev_priv->display.hotplug.pch_hpd,
1579                                    icp_ddi_port_hotplug_long_detect);
1580         }
1581
1582         if (tc_hotplug_trigger) {
1583                 u32 dig_hotplug_reg;
1584
1585                 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, 0, 0);
1586
1587                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1588                                    tc_hotplug_trigger, dig_hotplug_reg,
1589                                    dev_priv->display.hotplug.pch_hpd,
1590                                    icp_tc_port_hotplug_long_detect);
1591         }
1592
1593         if (pin_mask)
1594                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1595
1596         if (pch_iir & SDE_GMBUS_ICP)
1597                 gmbus_irq_handler(dev_priv);
1598 }
1599
1600 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1601 {
1602         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1603                 ~SDE_PORTE_HOTPLUG_SPT;
1604         u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1605         u32 pin_mask = 0, long_mask = 0;
1606
1607         if (hotplug_trigger) {
1608                 u32 dig_hotplug_reg;
1609
1610                 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
1611
1612                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1613                                    hotplug_trigger, dig_hotplug_reg,
1614                                    dev_priv->display.hotplug.pch_hpd,
1615                                    spt_port_hotplug_long_detect);
1616         }
1617
1618         if (hotplug2_trigger) {
1619                 u32 dig_hotplug_reg;
1620
1621                 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, 0, 0);
1622
1623                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1624                                    hotplug2_trigger, dig_hotplug_reg,
1625                                    dev_priv->display.hotplug.pch_hpd,
1626                                    spt_port_hotplug2_long_detect);
1627         }
1628
1629         if (pin_mask)
1630                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1631
1632         if (pch_iir & SDE_GMBUS_CPT)
1633                 gmbus_irq_handler(dev_priv);
1634 }
1635
1636 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
1637                                 u32 hotplug_trigger)
1638 {
1639         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1640
1641         dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0);
1642
1643         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1644                            hotplug_trigger, dig_hotplug_reg,
1645                            dev_priv->display.hotplug.hpd,
1646                            ilk_port_hotplug_long_detect);
1647
1648         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1649 }
1650
1651 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
1652                                     u32 de_iir)
1653 {
1654         enum pipe pipe;
1655         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
1656
1657         if (hotplug_trigger)
1658                 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
1659
1660         if (de_iir & DE_AUX_CHANNEL_A)
1661                 dp_aux_irq_handler(dev_priv);
1662
1663         if (de_iir & DE_GSE)
1664                 intel_opregion_asle_intr(dev_priv);
1665
1666         if (de_iir & DE_POISON)
1667                 drm_err(&dev_priv->drm, "Poison interrupt\n");
1668
1669         for_each_pipe(dev_priv, pipe) {
1670                 if (de_iir & DE_PIPE_VBLANK(pipe))
1671                         intel_handle_vblank(dev_priv, pipe);
1672
1673                 if (de_iir & DE_PLANE_FLIP_DONE(pipe))
1674                         flip_done_handler(dev_priv, pipe);
1675
1676                 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1677                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1678
1679                 if (de_iir & DE_PIPE_CRC_DONE(pipe))
1680                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1681         }
1682
1683         /* check event from PCH */
1684         if (de_iir & DE_PCH_EVENT) {
1685                 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
1686
1687                 if (HAS_PCH_CPT(dev_priv))
1688                         cpt_irq_handler(dev_priv, pch_iir);
1689                 else
1690                         ibx_irq_handler(dev_priv, pch_iir);
1691
1692                 /* should clear PCH hotplug event before clear CPU irq */
1693                 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
1694         }
1695
1696         if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
1697                 gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
1698 }
1699
1700 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
1701                                     u32 de_iir)
1702 {
1703         enum pipe pipe;
1704         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
1705
1706         if (hotplug_trigger)
1707                 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
1708
1709         if (de_iir & DE_ERR_INT_IVB)
1710                 ivb_err_int_handler(dev_priv);
1711
1712         if (de_iir & DE_AUX_CHANNEL_A_IVB)
1713                 dp_aux_irq_handler(dev_priv);
1714
1715         if (de_iir & DE_GSE_IVB)
1716                 intel_opregion_asle_intr(dev_priv);
1717
1718         for_each_pipe(dev_priv, pipe) {
1719                 if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
1720                         intel_handle_vblank(dev_priv, pipe);
1721
1722                 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
1723                         flip_done_handler(dev_priv, pipe);
1724         }
1725
1726         /* check event from PCH */
1727         if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
1728                 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
1729
1730                 cpt_irq_handler(dev_priv, pch_iir);
1731
1732                 /* clear PCH hotplug event before clear CPU irq */
1733                 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
1734         }
1735 }
1736
1737 /*
1738  * To handle irqs with the minimum potential races with fresh interrupts, we:
1739  * 1 - Disable Master Interrupt Control.
1740  * 2 - Find the source(s) of the interrupt.
1741  * 3 - Clear the Interrupt Identity bits (IIR).
1742  * 4 - Process the interrupt(s) that had bits set in the IIRs.
1743  * 5 - Re-enable Master Interrupt Control.
1744  */
1745 static irqreturn_t ilk_irq_handler(int irq, void *arg)
1746 {
1747         struct drm_i915_private *i915 = arg;
1748         void __iomem * const regs = i915->uncore.regs;
1749         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1750         irqreturn_t ret = IRQ_NONE;
1751
1752         if (unlikely(!intel_irqs_enabled(i915)))
1753                 return IRQ_NONE;
1754
1755         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1756         disable_rpm_wakeref_asserts(&i915->runtime_pm);
1757
1758         /* disable master interrupt before clearing iir  */
1759         de_ier = raw_reg_read(regs, DEIER);
1760         raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1761
1762         /* Disable south interrupts. We'll only write to SDEIIR once, so further
1763          * interrupts will will be stored on its back queue, and then we'll be
1764          * able to process them after we restore SDEIER (as soon as we restore
1765          * it, we'll get an interrupt if SDEIIR still has something to process
1766          * due to its back queue). */
1767         if (!HAS_PCH_NOP(i915)) {
1768                 sde_ier = raw_reg_read(regs, SDEIER);
1769                 raw_reg_write(regs, SDEIER, 0);
1770         }
1771
1772         /* Find, clear, then process each source of interrupt */
1773
1774         gt_iir = raw_reg_read(regs, GTIIR);
1775         if (gt_iir) {
1776                 raw_reg_write(regs, GTIIR, gt_iir);
1777                 if (GRAPHICS_VER(i915) >= 6)
1778                         gen6_gt_irq_handler(to_gt(i915), gt_iir);
1779                 else
1780                         gen5_gt_irq_handler(to_gt(i915), gt_iir);
1781                 ret = IRQ_HANDLED;
1782         }
1783
1784         de_iir = raw_reg_read(regs, DEIIR);
1785         if (de_iir) {
1786                 raw_reg_write(regs, DEIIR, de_iir);
1787                 if (DISPLAY_VER(i915) >= 7)
1788                         ivb_display_irq_handler(i915, de_iir);
1789                 else
1790                         ilk_display_irq_handler(i915, de_iir);
1791                 ret = IRQ_HANDLED;
1792         }
1793
1794         if (GRAPHICS_VER(i915) >= 6) {
1795                 u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
1796                 if (pm_iir) {
1797                         raw_reg_write(regs, GEN6_PMIIR, pm_iir);
1798                         gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
1799                         ret = IRQ_HANDLED;
1800                 }
1801         }
1802
1803         raw_reg_write(regs, DEIER, de_ier);
1804         if (sde_ier)
1805                 raw_reg_write(regs, SDEIER, sde_ier);
1806
1807         pmu_irq_stats(i915, ret);
1808
1809         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1810         enable_rpm_wakeref_asserts(&i915->runtime_pm);
1811
1812         return ret;
1813 }
1814
1815 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
1816                                 u32 hotplug_trigger)
1817 {
1818         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1819
1820         dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
1821
1822         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1823                            hotplug_trigger, dig_hotplug_reg,
1824                            dev_priv->display.hotplug.hpd,
1825                            bxt_port_hotplug_long_detect);
1826
1827         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1828 }
1829
1830 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
1831 {
1832         u32 pin_mask = 0, long_mask = 0;
1833         u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
1834         u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
1835
1836         if (trigger_tc) {
1837                 u32 dig_hotplug_reg;
1838
1839                 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, 0, 0);
1840
1841                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1842                                    trigger_tc, dig_hotplug_reg,
1843                                    dev_priv->display.hotplug.hpd,
1844                                    gen11_port_hotplug_long_detect);
1845         }
1846
1847         if (trigger_tbt) {
1848                 u32 dig_hotplug_reg;
1849
1850                 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, 0, 0);
1851
1852                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1853                                    trigger_tbt, dig_hotplug_reg,
1854                                    dev_priv->display.hotplug.hpd,
1855                                    gen11_port_hotplug_long_detect);
1856         }
1857
1858         if (pin_mask)
1859                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1860         else
1861                 drm_err(&dev_priv->drm,
1862                         "Unexpected DE HPD interrupt 0x%08x\n", iir);
1863 }
1864
1865 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
1866 {
1867         u32 mask;
1868
1869         if (DISPLAY_VER(dev_priv) >= 13)
1870                 return TGL_DE_PORT_AUX_DDIA |
1871                         TGL_DE_PORT_AUX_DDIB |
1872                         TGL_DE_PORT_AUX_DDIC |
1873                         XELPD_DE_PORT_AUX_DDID |
1874                         XELPD_DE_PORT_AUX_DDIE |
1875                         TGL_DE_PORT_AUX_USBC1 |
1876                         TGL_DE_PORT_AUX_USBC2 |
1877                         TGL_DE_PORT_AUX_USBC3 |
1878                         TGL_DE_PORT_AUX_USBC4;
1879         else if (DISPLAY_VER(dev_priv) >= 12)
1880                 return TGL_DE_PORT_AUX_DDIA |
1881                         TGL_DE_PORT_AUX_DDIB |
1882                         TGL_DE_PORT_AUX_DDIC |
1883                         TGL_DE_PORT_AUX_USBC1 |
1884                         TGL_DE_PORT_AUX_USBC2 |
1885                         TGL_DE_PORT_AUX_USBC3 |
1886                         TGL_DE_PORT_AUX_USBC4 |
1887                         TGL_DE_PORT_AUX_USBC5 |
1888                         TGL_DE_PORT_AUX_USBC6;
1889
1890
1891         mask = GEN8_AUX_CHANNEL_A;
1892         if (DISPLAY_VER(dev_priv) >= 9)
1893                 mask |= GEN9_AUX_CHANNEL_B |
1894                         GEN9_AUX_CHANNEL_C |
1895                         GEN9_AUX_CHANNEL_D;
1896
1897         if (DISPLAY_VER(dev_priv) == 11) {
1898                 mask |= ICL_AUX_CHANNEL_F;
1899                 mask |= ICL_AUX_CHANNEL_E;
1900         }
1901
1902         return mask;
1903 }
1904
1905 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
1906 {
1907         if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
1908                 return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
1909         else if (DISPLAY_VER(dev_priv) >= 11)
1910                 return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
1911         else if (DISPLAY_VER(dev_priv) >= 9)
1912                 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
1913         else
1914                 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
1915 }
1916
1917 static void
1918 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
1919 {
1920         bool found = false;
1921
1922         if (iir & GEN8_DE_MISC_GSE) {
1923                 intel_opregion_asle_intr(dev_priv);
1924                 found = true;
1925         }
1926
1927         if (iir & GEN8_DE_EDP_PSR) {
1928                 struct intel_encoder *encoder;
1929                 u32 psr_iir;
1930                 i915_reg_t iir_reg;
1931
1932                 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
1933                         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1934
1935                         if (DISPLAY_VER(dev_priv) >= 12)
1936                                 iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder);
1937                         else
1938                                 iir_reg = EDP_PSR_IIR;
1939
1940                         psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0);
1941
1942                         if (psr_iir)
1943                                 found = true;
1944
1945                         intel_psr_irq_handler(intel_dp, psr_iir);
1946
1947                         /* prior GEN12 only have one EDP PSR */
1948                         if (DISPLAY_VER(dev_priv) < 12)
1949                                 break;
1950                 }
1951         }
1952
1953         if (!found)
1954                 drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
1955 }
1956
1957 static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
1958                                            u32 te_trigger)
1959 {
1960         enum pipe pipe = INVALID_PIPE;
1961         enum transcoder dsi_trans;
1962         enum port port;
1963         u32 val, tmp;
1964
1965         /*
1966          * Incase of dual link, TE comes from DSI_1
1967          * this is to check if dual link is enabled
1968          */
1969         val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
1970         val &= PORT_SYNC_MODE_ENABLE;
1971
1972         /*
1973          * if dual link is enabled, then read DSI_0
1974          * transcoder registers
1975          */
1976         port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
1977                                                   PORT_A : PORT_B;
1978         dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
1979
1980         /* Check if DSI configured in command mode */
1981         val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
1982         val = val & OP_MODE_MASK;
1983
1984         if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
1985                 drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
1986                 return;
1987         }
1988
1989         /* Get PIPE for handling VBLANK event */
1990         val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
1991         switch (val & TRANS_DDI_EDP_INPUT_MASK) {
1992         case TRANS_DDI_EDP_INPUT_A_ON:
1993                 pipe = PIPE_A;
1994                 break;
1995         case TRANS_DDI_EDP_INPUT_B_ONOFF:
1996                 pipe = PIPE_B;
1997                 break;
1998         case TRANS_DDI_EDP_INPUT_C_ONOFF:
1999                 pipe = PIPE_C;
2000                 break;
2001         default:
2002                 drm_err(&dev_priv->drm, "Invalid PIPE\n");
2003                 return;
2004         }
2005
2006         intel_handle_vblank(dev_priv, pipe);
2007
2008         /* clear TE in dsi IIR */
2009         port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
2010         tmp = intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
2011 }
2012
2013 static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
2014 {
2015         if (DISPLAY_VER(i915) >= 9)
2016                 return GEN9_PIPE_PLANE1_FLIP_DONE;
2017         else
2018                 return GEN8_PIPE_PRIMARY_FLIP_DONE;
2019 }
2020
2021 u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv)
2022 {
2023         u32 mask = GEN8_PIPE_FIFO_UNDERRUN;
2024
2025         if (DISPLAY_VER(dev_priv) >= 13)
2026                 mask |= XELPD_PIPE_SOFT_UNDERRUN |
2027                         XELPD_PIPE_HARD_UNDERRUN;
2028
2029         return mask;
2030 }
2031
2032 static irqreturn_t
2033 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2034 {
2035         irqreturn_t ret = IRQ_NONE;
2036         u32 iir;
2037         enum pipe pipe;
2038
2039         drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
2040
2041         if (master_ctl & GEN8_DE_MISC_IRQ) {
2042                 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
2043                 if (iir) {
2044                         intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
2045                         ret = IRQ_HANDLED;
2046                         gen8_de_misc_irq_handler(dev_priv, iir);
2047                 } else {
2048                         drm_err_ratelimited(&dev_priv->drm,
2049                                             "The master control interrupt lied (DE MISC)!\n");
2050                 }
2051         }
2052
2053         if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2054                 iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
2055                 if (iir) {
2056                         intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
2057                         ret = IRQ_HANDLED;
2058                         gen11_hpd_irq_handler(dev_priv, iir);
2059                 } else {
2060                         drm_err_ratelimited(&dev_priv->drm,
2061                                             "The master control interrupt lied, (DE HPD)!\n");
2062                 }
2063         }
2064
2065         if (master_ctl & GEN8_DE_PORT_IRQ) {
2066                 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
2067                 if (iir) {
2068                         bool found = false;
2069
2070                         intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
2071                         ret = IRQ_HANDLED;
2072
2073                         if (iir & gen8_de_port_aux_mask(dev_priv)) {
2074                                 dp_aux_irq_handler(dev_priv);
2075                                 found = true;
2076                         }
2077
2078                         if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
2079                                 u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
2080
2081                                 if (hotplug_trigger) {
2082                                         bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
2083                                         found = true;
2084                                 }
2085                         } else if (IS_BROADWELL(dev_priv)) {
2086                                 u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
2087
2088                                 if (hotplug_trigger) {
2089                                         ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2090                                         found = true;
2091                                 }
2092                         }
2093
2094                         if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
2095                             (iir & BXT_DE_PORT_GMBUS)) {
2096                                 gmbus_irq_handler(dev_priv);
2097                                 found = true;
2098                         }
2099
2100                         if (DISPLAY_VER(dev_priv) >= 11) {
2101                                 u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
2102
2103                                 if (te_trigger) {
2104                                         gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
2105                                         found = true;
2106                                 }
2107                         }
2108
2109                         if (!found)
2110                                 drm_err_ratelimited(&dev_priv->drm,
2111                                                     "Unexpected DE Port interrupt\n");
2112                 }
2113                 else
2114                         drm_err_ratelimited(&dev_priv->drm,
2115                                             "The master control interrupt lied (DE PORT)!\n");
2116         }
2117
2118         for_each_pipe(dev_priv, pipe) {
2119                 u32 fault_errors;
2120
2121                 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2122                         continue;
2123
2124                 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
2125                 if (!iir) {
2126                         drm_err_ratelimited(&dev_priv->drm,
2127                                             "The master control interrupt lied (DE PIPE)!\n");
2128                         continue;
2129                 }
2130
2131                 ret = IRQ_HANDLED;
2132                 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
2133
2134                 if (iir & GEN8_PIPE_VBLANK)
2135                         intel_handle_vblank(dev_priv, pipe);
2136
2137                 if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
2138                         flip_done_handler(dev_priv, pipe);
2139
2140                 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2141                         hsw_pipe_crc_irq_handler(dev_priv, pipe);
2142
2143                 if (iir & gen8_de_pipe_underrun_mask(dev_priv))
2144                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2145
2146                 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2147                 if (fault_errors)
2148                         drm_err_ratelimited(&dev_priv->drm,
2149                                             "Fault errors on pipe %c: 0x%08x\n",
2150                                             pipe_name(pipe),
2151                                             fault_errors);
2152         }
2153
2154         if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2155             master_ctl & GEN8_DE_PCH_IRQ) {
2156                 /*
2157                  * FIXME(BDW): Assume for now that the new interrupt handling
2158                  * scheme also closed the SDE interrupt handling race we've seen
2159                  * on older pch-split platforms. But this needs testing.
2160                  */
2161                 iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2162                 if (iir) {
2163                         intel_uncore_write(&dev_priv->uncore, SDEIIR, iir);
2164                         ret = IRQ_HANDLED;
2165
2166                         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2167                                 icp_irq_handler(dev_priv, iir);
2168                         else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2169                                 spt_irq_handler(dev_priv, iir);
2170                         else
2171                                 cpt_irq_handler(dev_priv, iir);
2172                 } else {
2173                         /*
2174                          * Like on previous PCH there seems to be something
2175                          * fishy going on with forwarding PCH interrupts.
2176                          */
2177                         drm_dbg(&dev_priv->drm,
2178                                 "The master control interrupt lied (SDE)!\n");
2179                 }
2180         }
2181
2182         return ret;
2183 }
2184
2185 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2186 {
2187         raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2188
2189         /*
2190          * Now with master disabled, get a sample of level indications
2191          * for this interrupt. Indications will be cleared on related acks.
2192          * New indications can and will light up during processing,
2193          * and will generate new interrupt after enabling master.
2194          */
2195         return raw_reg_read(regs, GEN8_MASTER_IRQ);
2196 }
2197
2198 static inline void gen8_master_intr_enable(void __iomem * const regs)
2199 {
2200         raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2201 }
2202
2203 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2204 {
2205         struct drm_i915_private *dev_priv = arg;
2206         void __iomem * const regs = dev_priv->uncore.regs;
2207         u32 master_ctl;
2208
2209         if (!intel_irqs_enabled(dev_priv))
2210                 return IRQ_NONE;
2211
2212         master_ctl = gen8_master_intr_disable(regs);
2213         if (!master_ctl) {
2214                 gen8_master_intr_enable(regs);
2215                 return IRQ_NONE;
2216         }
2217
2218         /* Find, queue (onto bottom-halves), then clear each source */
2219         gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
2220
2221         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2222         if (master_ctl & ~GEN8_GT_IRQS) {
2223                 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2224                 gen8_de_irq_handler(dev_priv, master_ctl);
2225                 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2226         }
2227
2228         gen8_master_intr_enable(regs);
2229
2230         pmu_irq_stats(dev_priv, IRQ_HANDLED);
2231
2232         return IRQ_HANDLED;
2233 }
2234
2235 static u32
2236 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
2237 {
2238         void __iomem * const regs = i915->uncore.regs;
2239         u32 iir;
2240
2241         if (!(master_ctl & GEN11_GU_MISC_IRQ))
2242                 return 0;
2243
2244         iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
2245         if (likely(iir))
2246                 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2247
2248         return iir;
2249 }
2250
2251 static void
2252 gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
2253 {
2254         if (iir & GEN11_GU_MISC_GSE)
2255                 intel_opregion_asle_intr(i915);
2256 }
2257
2258 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2259 {
2260         raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2261
2262         /*
2263          * Now with master disabled, get a sample of level indications
2264          * for this interrupt. Indications will be cleared on related acks.
2265          * New indications can and will light up during processing,
2266          * and will generate new interrupt after enabling master.
2267          */
2268         return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2269 }
2270
2271 static inline void gen11_master_intr_enable(void __iomem * const regs)
2272 {
2273         raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2274 }
2275
2276 static void
2277 gen11_display_irq_handler(struct drm_i915_private *i915)
2278 {
2279         void __iomem * const regs = i915->uncore.regs;
2280         const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
2281
2282         disable_rpm_wakeref_asserts(&i915->runtime_pm);
2283         /*
2284          * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
2285          * for the display related bits.
2286          */
2287         raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
2288         gen8_de_irq_handler(i915, disp_ctl);
2289         raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
2290                       GEN11_DISPLAY_IRQ_ENABLE);
2291
2292         enable_rpm_wakeref_asserts(&i915->runtime_pm);
2293 }
2294
2295 static irqreturn_t gen11_irq_handler(int irq, void *arg)
2296 {
2297         struct drm_i915_private *i915 = arg;
2298         void __iomem * const regs = i915->uncore.regs;
2299         struct intel_gt *gt = to_gt(i915);
2300         u32 master_ctl;
2301         u32 gu_misc_iir;
2302
2303         if (!intel_irqs_enabled(i915))
2304                 return IRQ_NONE;
2305
2306         master_ctl = gen11_master_intr_disable(regs);
2307         if (!master_ctl) {
2308                 gen11_master_intr_enable(regs);
2309                 return IRQ_NONE;
2310         }
2311
2312         /* Find, queue (onto bottom-halves), then clear each source */
2313         gen11_gt_irq_handler(gt, master_ctl);
2314
2315         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2316         if (master_ctl & GEN11_DISPLAY_IRQ)
2317                 gen11_display_irq_handler(i915);
2318
2319         gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2320
2321         gen11_master_intr_enable(regs);
2322
2323         gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2324
2325         pmu_irq_stats(i915, IRQ_HANDLED);
2326
2327         return IRQ_HANDLED;
2328 }
2329
2330 static inline u32 dg1_master_intr_disable(void __iomem * const regs)
2331 {
2332         u32 val;
2333
2334         /* First disable interrupts */
2335         raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0);
2336
2337         /* Get the indication levels and ack the master unit */
2338         val = raw_reg_read(regs, DG1_MSTR_TILE_INTR);
2339         if (unlikely(!val))
2340                 return 0;
2341
2342         raw_reg_write(regs, DG1_MSTR_TILE_INTR, val);
2343
2344         return val;
2345 }
2346
2347 static inline void dg1_master_intr_enable(void __iomem * const regs)
2348 {
2349         raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
2350 }
2351
2352 static irqreturn_t dg1_irq_handler(int irq, void *arg)
2353 {
2354         struct drm_i915_private * const i915 = arg;
2355         struct intel_gt *gt = to_gt(i915);
2356         void __iomem * const regs = gt->uncore->regs;
2357         u32 master_tile_ctl, master_ctl;
2358         u32 gu_misc_iir;
2359
2360         if (!intel_irqs_enabled(i915))
2361                 return IRQ_NONE;
2362
2363         master_tile_ctl = dg1_master_intr_disable(regs);
2364         if (!master_tile_ctl) {
2365                 dg1_master_intr_enable(regs);
2366                 return IRQ_NONE;
2367         }
2368
2369         /* FIXME: we only support tile 0 for now. */
2370         if (master_tile_ctl & DG1_MSTR_TILE(0)) {
2371                 master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2372                 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
2373         } else {
2374                 drm_err(&i915->drm, "Tile not supported: 0x%08x\n",
2375                         master_tile_ctl);
2376                 dg1_master_intr_enable(regs);
2377                 return IRQ_NONE;
2378         }
2379
2380         gen11_gt_irq_handler(gt, master_ctl);
2381
2382         if (master_ctl & GEN11_DISPLAY_IRQ)
2383                 gen11_display_irq_handler(i915);
2384
2385         gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2386
2387         dg1_master_intr_enable(regs);
2388
2389         gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2390
2391         pmu_irq_stats(i915, IRQ_HANDLED);
2392
2393         return IRQ_HANDLED;
2394 }
2395
2396 /* Called from drm generic code, passed 'crtc' which
2397  * we use as a pipe index
2398  */
2399 int i8xx_enable_vblank(struct drm_crtc *crtc)
2400 {
2401         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2402         enum pipe pipe = to_intel_crtc(crtc)->pipe;
2403         unsigned long irqflags;
2404
2405         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2406         i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2407         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2408
2409         return 0;
2410 }
2411
2412 int i915gm_enable_vblank(struct drm_crtc *crtc)
2413 {
2414         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2415
2416         /*
2417          * Vblank interrupts fail to wake the device up from C2+.
2418          * Disabling render clock gating during C-states avoids
2419          * the problem. There is a small power cost so we do this
2420          * only when vblank interrupts are actually enabled.
2421          */
2422         if (dev_priv->vblank_enabled++ == 0)
2423                 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2424
2425         return i8xx_enable_vblank(crtc);
2426 }
2427
2428 int i965_enable_vblank(struct drm_crtc *crtc)
2429 {
2430         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2431         enum pipe pipe = to_intel_crtc(crtc)->pipe;
2432         unsigned long irqflags;
2433
2434         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2435         i915_enable_pipestat(dev_priv, pipe,
2436                              PIPE_START_VBLANK_INTERRUPT_STATUS);
2437         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2438
2439         return 0;
2440 }
2441
2442 int ilk_enable_vblank(struct drm_crtc *crtc)
2443 {
2444         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2445         enum pipe pipe = to_intel_crtc(crtc)->pipe;
2446         unsigned long irqflags;
2447         u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2448                 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2449
2450         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2451         ilk_enable_display_irq(dev_priv, bit);
2452         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2453
2454         /* Even though there is no DMC, frame counter can get stuck when
2455          * PSR is active as no frames are generated.
2456          */
2457         if (HAS_PSR(dev_priv))
2458                 drm_crtc_vblank_restore(crtc);
2459
2460         return 0;
2461 }
2462
2463 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
2464                                    bool enable)
2465 {
2466         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
2467         enum port port;
2468
2469         if (!(intel_crtc->mode_flags &
2470             (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
2471                 return false;
2472
2473         /* for dual link cases we consider TE from slave */
2474         if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
2475                 port = PORT_B;
2476         else
2477                 port = PORT_A;
2478
2479         intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT,
2480                          enable ? 0 : DSI_TE_EVENT);
2481
2482         intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
2483
2484         return true;
2485 }
2486
2487 int bdw_enable_vblank(struct drm_crtc *_crtc)
2488 {
2489         struct intel_crtc *crtc = to_intel_crtc(_crtc);
2490         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2491         enum pipe pipe = crtc->pipe;
2492         unsigned long irqflags;
2493
2494         if (gen11_dsi_configure_te(crtc, true))
2495                 return 0;
2496
2497         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2498         bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2499         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2500
2501         /* Even if there is no DMC, frame counter can get stuck when
2502          * PSR is active as no frames are generated, so check only for PSR.
2503          */
2504         if (HAS_PSR(dev_priv))
2505                 drm_crtc_vblank_restore(&crtc->base);
2506
2507         return 0;
2508 }
2509
2510 /* Called from drm generic code, passed 'crtc' which
2511  * we use as a pipe index
2512  */
2513 void i8xx_disable_vblank(struct drm_crtc *crtc)
2514 {
2515         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2516         enum pipe pipe = to_intel_crtc(crtc)->pipe;
2517         unsigned long irqflags;
2518
2519         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2520         i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2521         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2522 }
2523
2524 void i915gm_disable_vblank(struct drm_crtc *crtc)
2525 {
2526         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2527
2528         i8xx_disable_vblank(crtc);
2529
2530         if (--dev_priv->vblank_enabled == 0)
2531                 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2532 }
2533
2534 void i965_disable_vblank(struct drm_crtc *crtc)
2535 {
2536         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2537         enum pipe pipe = to_intel_crtc(crtc)->pipe;
2538         unsigned long irqflags;
2539
2540         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2541         i915_disable_pipestat(dev_priv, pipe,
2542                               PIPE_START_VBLANK_INTERRUPT_STATUS);
2543         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2544 }
2545
2546 void ilk_disable_vblank(struct drm_crtc *crtc)
2547 {
2548         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2549         enum pipe pipe = to_intel_crtc(crtc)->pipe;
2550         unsigned long irqflags;
2551         u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2552                 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2553
2554         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2555         ilk_disable_display_irq(dev_priv, bit);
2556         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2557 }
2558
2559 void bdw_disable_vblank(struct drm_crtc *_crtc)
2560 {
2561         struct intel_crtc *crtc = to_intel_crtc(_crtc);
2562         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2563         enum pipe pipe = crtc->pipe;
2564         unsigned long irqflags;
2565
2566         if (gen11_dsi_configure_te(crtc, false))
2567                 return;
2568
2569         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2570         bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2571         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2572 }
2573
2574 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
2575 {
2576         struct intel_uncore *uncore = &dev_priv->uncore;
2577
2578         if (HAS_PCH_NOP(dev_priv))
2579                 return;
2580
2581         GEN3_IRQ_RESET(uncore, SDE);
2582
2583         if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2584                 intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
2585 }
2586
2587 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2588 {
2589         struct intel_uncore *uncore = &dev_priv->uncore;
2590
2591         if (IS_CHERRYVIEW(dev_priv))
2592                 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2593         else
2594                 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
2595
2596         i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
2597         intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
2598
2599         i9xx_pipestat_irq_reset(dev_priv);
2600
2601         GEN3_IRQ_RESET(uncore, VLV_);
2602         dev_priv->irq_mask = ~0u;
2603 }
2604
2605 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
2606 {
2607         struct intel_uncore *uncore = &dev_priv->uncore;
2608
2609         u32 pipestat_mask;
2610         u32 enable_mask;
2611         enum pipe pipe;
2612
2613         pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
2614
2615         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
2616         for_each_pipe(dev_priv, pipe)
2617                 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
2618
2619         enable_mask = I915_DISPLAY_PORT_INTERRUPT |
2620                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2621                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2622                 I915_LPE_PIPE_A_INTERRUPT |
2623                 I915_LPE_PIPE_B_INTERRUPT;
2624
2625         if (IS_CHERRYVIEW(dev_priv))
2626                 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
2627                         I915_LPE_PIPE_C_INTERRUPT;
2628
2629         drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
2630
2631         dev_priv->irq_mask = ~enable_mask;
2632
2633         GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
2634 }
2635
2636 /* drm_dma.h hooks
2637 */
2638 static void ilk_irq_reset(struct drm_i915_private *dev_priv)
2639 {
2640         struct intel_uncore *uncore = &dev_priv->uncore;
2641
2642         GEN3_IRQ_RESET(uncore, DE);
2643         dev_priv->irq_mask = ~0u;
2644
2645         if (GRAPHICS_VER(dev_priv) == 7)
2646                 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
2647
2648         if (IS_HASWELL(dev_priv)) {
2649                 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2650                 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2651         }
2652
2653         gen5_gt_irq_reset(to_gt(dev_priv));
2654
2655         ibx_irq_reset(dev_priv);
2656 }
2657
2658 static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
2659 {
2660         intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
2661         intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
2662
2663         gen5_gt_irq_reset(to_gt(dev_priv));
2664
2665         spin_lock_irq(&dev_priv->irq_lock);
2666         if (dev_priv->display_irqs_enabled)
2667                 vlv_display_irq_reset(dev_priv);
2668         spin_unlock_irq(&dev_priv->irq_lock);
2669 }
2670
2671 static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
2672 {
2673         struct intel_uncore *uncore = &dev_priv->uncore;
2674         enum pipe pipe;
2675
2676         if (!HAS_DISPLAY(dev_priv))
2677                 return;
2678
2679         intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2680         intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2681
2682         for_each_pipe(dev_priv, pipe)
2683                 if (intel_display_power_is_enabled(dev_priv,
2684                                                    POWER_DOMAIN_PIPE(pipe)))
2685                         GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2686
2687         GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
2688         GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
2689 }
2690
2691 static void gen8_irq_reset(struct drm_i915_private *dev_priv)
2692 {
2693         struct intel_uncore *uncore = &dev_priv->uncore;
2694
2695         gen8_master_intr_disable(uncore->regs);
2696
2697         gen8_gt_irq_reset(to_gt(dev_priv));
2698         gen8_display_irq_reset(dev_priv);
2699         GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2700
2701         if (HAS_PCH_SPLIT(dev_priv))
2702                 ibx_irq_reset(dev_priv);
2703
2704 }
2705
2706 static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
2707 {
2708         struct intel_uncore *uncore = &dev_priv->uncore;
2709         enum pipe pipe;
2710         u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
2711                 BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
2712
2713         if (!HAS_DISPLAY(dev_priv))
2714                 return;
2715
2716         intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
2717
2718         if (DISPLAY_VER(dev_priv) >= 12) {
2719                 enum transcoder trans;
2720
2721                 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
2722                         enum intel_display_power_domain domain;
2723
2724                         domain = POWER_DOMAIN_TRANSCODER(trans);
2725                         if (!intel_display_power_is_enabled(dev_priv, domain))
2726                                 continue;
2727
2728                         intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
2729                         intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
2730                 }
2731         } else {
2732                 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2733                 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2734         }
2735
2736         for_each_pipe(dev_priv, pipe)
2737                 if (intel_display_power_is_enabled(dev_priv,
2738                                                    POWER_DOMAIN_PIPE(pipe)))
2739                         GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2740
2741         GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
2742         GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
2743         GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
2744
2745         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2746                 GEN3_IRQ_RESET(uncore, SDE);
2747 }
2748
2749 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
2750 {
2751         struct intel_gt *gt = to_gt(dev_priv);
2752         struct intel_uncore *uncore = gt->uncore;
2753
2754         gen11_master_intr_disable(dev_priv->uncore.regs);
2755
2756         gen11_gt_irq_reset(gt);
2757         gen11_display_irq_reset(dev_priv);
2758
2759         GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
2760         GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2761 }
2762
2763 static void dg1_irq_reset(struct drm_i915_private *dev_priv)
2764 {
2765         struct intel_gt *gt = to_gt(dev_priv);
2766         struct intel_uncore *uncore = gt->uncore;
2767
2768         dg1_master_intr_disable(dev_priv->uncore.regs);
2769
2770         gen11_gt_irq_reset(gt);
2771         gen11_display_irq_reset(dev_priv);
2772
2773         GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
2774         GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2775 }
2776
2777 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
2778                                      u8 pipe_mask)
2779 {
2780         struct intel_uncore *uncore = &dev_priv->uncore;
2781         u32 extra_ier = GEN8_PIPE_VBLANK |
2782                 gen8_de_pipe_underrun_mask(dev_priv) |
2783                 gen8_de_pipe_flip_done_mask(dev_priv);
2784         enum pipe pipe;
2785
2786         spin_lock_irq(&dev_priv->irq_lock);
2787
2788         if (!intel_irqs_enabled(dev_priv)) {
2789                 spin_unlock_irq(&dev_priv->irq_lock);
2790                 return;
2791         }
2792
2793         for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2794                 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
2795                                   dev_priv->de_irq_mask[pipe],
2796                                   ~dev_priv->de_irq_mask[pipe] | extra_ier);
2797
2798         spin_unlock_irq(&dev_priv->irq_lock);
2799 }
2800
2801 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
2802                                      u8 pipe_mask)
2803 {
2804         struct intel_uncore *uncore = &dev_priv->uncore;
2805         enum pipe pipe;
2806
2807         spin_lock_irq(&dev_priv->irq_lock);
2808
2809         if (!intel_irqs_enabled(dev_priv)) {
2810                 spin_unlock_irq(&dev_priv->irq_lock);
2811                 return;
2812         }
2813
2814         for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2815                 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2816
2817         spin_unlock_irq(&dev_priv->irq_lock);
2818
2819         /* make sure we're done processing display irqs */
2820         intel_synchronize_irq(dev_priv);
2821 }
2822
2823 static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
2824 {
2825         struct intel_uncore *uncore = &dev_priv->uncore;
2826
2827         intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
2828         intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
2829
2830         gen8_gt_irq_reset(to_gt(dev_priv));
2831
2832         GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2833
2834         spin_lock_irq(&dev_priv->irq_lock);
2835         if (dev_priv->display_irqs_enabled)
2836                 vlv_display_irq_reset(dev_priv);
2837         spin_unlock_irq(&dev_priv->irq_lock);
2838 }
2839
2840 static u32 ibx_hotplug_enables(struct intel_encoder *encoder)
2841 {
2842         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2843
2844         switch (encoder->hpd_pin) {
2845         case HPD_PORT_A:
2846                 /*
2847                  * When CPU and PCH are on the same package, port A
2848                  * HPD must be enabled in both north and south.
2849                  */
2850                 return HAS_PCH_LPT_LP(i915) ?
2851                         PORTA_HOTPLUG_ENABLE : 0;
2852         case HPD_PORT_B:
2853                 return PORTB_HOTPLUG_ENABLE |
2854                         PORTB_PULSE_DURATION_2ms;
2855         case HPD_PORT_C:
2856                 return PORTC_HOTPLUG_ENABLE |
2857                         PORTC_PULSE_DURATION_2ms;
2858         case HPD_PORT_D:
2859                 return PORTD_HOTPLUG_ENABLE |
2860                         PORTD_PULSE_DURATION_2ms;
2861         default:
2862                 return 0;
2863         }
2864 }
2865
2866 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
2867 {
2868         /*
2869          * Enable digital hotplug on the PCH, and configure the DP short pulse
2870          * duration to 2ms (which is the minimum in the Display Port spec).
2871          * The pulse duration bits are reserved on LPT+.
2872          */
2873         intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
2874                          PORTA_HOTPLUG_ENABLE |
2875                          PORTB_HOTPLUG_ENABLE |
2876                          PORTC_HOTPLUG_ENABLE |
2877                          PORTD_HOTPLUG_ENABLE |
2878                          PORTB_PULSE_DURATION_MASK |
2879                          PORTC_PULSE_DURATION_MASK |
2880                          PORTD_PULSE_DURATION_MASK,
2881                          intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables));
2882 }
2883
2884 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
2885 {
2886         u32 hotplug_irqs, enabled_irqs;
2887
2888         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
2889         hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
2890
2891         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
2892
2893         ibx_hpd_detection_setup(dev_priv);
2894 }
2895
2896 static u32 icp_ddi_hotplug_enables(struct intel_encoder *encoder)
2897 {
2898         switch (encoder->hpd_pin) {
2899         case HPD_PORT_A:
2900         case HPD_PORT_B:
2901         case HPD_PORT_C:
2902         case HPD_PORT_D:
2903                 return SHOTPLUG_CTL_DDI_HPD_ENABLE(encoder->hpd_pin);
2904         default:
2905                 return 0;
2906         }
2907 }
2908
2909 static u32 icp_tc_hotplug_enables(struct intel_encoder *encoder)
2910 {
2911         switch (encoder->hpd_pin) {
2912         case HPD_PORT_TC1:
2913         case HPD_PORT_TC2:
2914         case HPD_PORT_TC3:
2915         case HPD_PORT_TC4:
2916         case HPD_PORT_TC5:
2917         case HPD_PORT_TC6:
2918                 return ICP_TC_HPD_ENABLE(encoder->hpd_pin);
2919         default:
2920                 return 0;
2921         }
2922 }
2923
2924 static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
2925 {
2926         intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI,
2927                          SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
2928                          SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
2929                          SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
2930                          SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D),
2931                          intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables));
2932 }
2933
2934 static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
2935 {
2936         intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC,
2937                          ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
2938                          ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
2939                          ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
2940                          ICP_TC_HPD_ENABLE(HPD_PORT_TC4) |
2941                          ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
2942                          ICP_TC_HPD_ENABLE(HPD_PORT_TC6),
2943                          intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables));
2944 }
2945
2946 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
2947 {
2948         u32 hotplug_irqs, enabled_irqs;
2949
2950         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
2951         hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
2952
2953         if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
2954                 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
2955
2956         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
2957
2958         icp_ddi_hpd_detection_setup(dev_priv);
2959         icp_tc_hpd_detection_setup(dev_priv);
2960 }
2961
2962 static u32 gen11_hotplug_enables(struct intel_encoder *encoder)
2963 {
2964         switch (encoder->hpd_pin) {
2965         case HPD_PORT_TC1:
2966         case HPD_PORT_TC2:
2967         case HPD_PORT_TC3:
2968         case HPD_PORT_TC4:
2969         case HPD_PORT_TC5:
2970         case HPD_PORT_TC6:
2971                 return GEN11_HOTPLUG_CTL_ENABLE(encoder->hpd_pin);
2972         default:
2973                 return 0;
2974         }
2975 }
2976
2977 static void dg1_hpd_invert(struct drm_i915_private *i915)
2978 {
2979         u32 val = (INVERT_DDIA_HPD |
2980                    INVERT_DDIB_HPD |
2981                    INVERT_DDIC_HPD |
2982                    INVERT_DDID_HPD);
2983         intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, 0, val);
2984 }
2985
2986 static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
2987 {
2988         dg1_hpd_invert(dev_priv);
2989         icp_hpd_irq_setup(dev_priv);
2990 }
2991
2992 static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
2993 {
2994         intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL,
2995                          GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
2996                          GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
2997                          GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
2998                          GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
2999                          GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3000                          GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6),
3001                          intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
3002 }
3003
3004 static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3005 {
3006         intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL,
3007                          GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3008                          GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3009                          GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3010                          GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3011                          GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3012                          GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6),
3013                          intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
3014 }
3015
3016 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3017 {
3018         u32 hotplug_irqs, enabled_irqs;
3019
3020         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3021         hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3022
3023         intel_uncore_rmw(&dev_priv->uncore, GEN11_DE_HPD_IMR, hotplug_irqs,
3024                          ~enabled_irqs & hotplug_irqs);
3025         intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3026
3027         gen11_tc_hpd_detection_setup(dev_priv);
3028         gen11_tbt_hpd_detection_setup(dev_priv);
3029
3030         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3031                 icp_hpd_irq_setup(dev_priv);
3032 }
3033
3034 static u32 spt_hotplug_enables(struct intel_encoder *encoder)
3035 {
3036         switch (encoder->hpd_pin) {
3037         case HPD_PORT_A:
3038                 return PORTA_HOTPLUG_ENABLE;
3039         case HPD_PORT_B:
3040                 return PORTB_HOTPLUG_ENABLE;
3041         case HPD_PORT_C:
3042                 return PORTC_HOTPLUG_ENABLE;
3043         case HPD_PORT_D:
3044                 return PORTD_HOTPLUG_ENABLE;
3045         default:
3046                 return 0;
3047         }
3048 }
3049
3050 static u32 spt_hotplug2_enables(struct intel_encoder *encoder)
3051 {
3052         switch (encoder->hpd_pin) {
3053         case HPD_PORT_E:
3054                 return PORTE_HOTPLUG_ENABLE;
3055         default:
3056                 return 0;
3057         }
3058 }
3059
3060 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3061 {
3062         /* Display WA #1179 WaHardHangonHotPlug: cnp */
3063         if (HAS_PCH_CNP(dev_priv)) {
3064                 intel_uncore_rmw(&dev_priv->uncore, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK,
3065                                  CHASSIS_CLK_REQ_DURATION(0xf));
3066         }
3067
3068         /* Enable digital hotplug on the PCH */
3069         intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
3070                          PORTA_HOTPLUG_ENABLE |
3071                          PORTB_HOTPLUG_ENABLE |
3072                          PORTC_HOTPLUG_ENABLE |
3073                          PORTD_HOTPLUG_ENABLE,
3074                          intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables));
3075
3076         intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, PORTE_HOTPLUG_ENABLE,
3077                          intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables));
3078 }
3079
3080 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3081 {
3082         u32 hotplug_irqs, enabled_irqs;
3083
3084         if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3085                 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3086
3087         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3088         hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3089
3090         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3091
3092         spt_hpd_detection_setup(dev_priv);
3093 }
3094
3095 static u32 ilk_hotplug_enables(struct intel_encoder *encoder)
3096 {
3097         switch (encoder->hpd_pin) {
3098         case HPD_PORT_A:
3099                 return DIGITAL_PORTA_HOTPLUG_ENABLE |
3100                         DIGITAL_PORTA_PULSE_DURATION_2ms;
3101         default:
3102                 return 0;
3103         }
3104 }
3105
3106 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3107 {
3108         /*
3109          * Enable digital hotplug on the CPU, and configure the DP short pulse
3110          * duration to 2ms (which is the minimum in the Display Port spec)
3111          * The pulse duration bits are reserved on HSW+.
3112          */
3113         intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL,
3114                          DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_MASK,
3115                          intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables));
3116 }
3117
3118 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3119 {
3120         u32 hotplug_irqs, enabled_irqs;
3121
3122         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3123         hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3124
3125         if (DISPLAY_VER(dev_priv) >= 8)
3126                 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3127         else
3128                 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3129
3130         ilk_hpd_detection_setup(dev_priv);
3131
3132         ibx_hpd_irq_setup(dev_priv);
3133 }
3134
3135 static u32 bxt_hotplug_enables(struct intel_encoder *encoder)
3136 {
3137         u32 hotplug;
3138
3139         switch (encoder->hpd_pin) {
3140         case HPD_PORT_A:
3141                 hotplug = PORTA_HOTPLUG_ENABLE;
3142                 if (intel_bios_encoder_hpd_invert(encoder->devdata))
3143                         hotplug |= BXT_DDIA_HPD_INVERT;
3144                 return hotplug;
3145         case HPD_PORT_B:
3146                 hotplug = PORTB_HOTPLUG_ENABLE;
3147                 if (intel_bios_encoder_hpd_invert(encoder->devdata))
3148                         hotplug |= BXT_DDIB_HPD_INVERT;
3149                 return hotplug;
3150         case HPD_PORT_C:
3151                 hotplug = PORTC_HOTPLUG_ENABLE;
3152                 if (intel_bios_encoder_hpd_invert(encoder->devdata))
3153                         hotplug |= BXT_DDIC_HPD_INVERT;
3154                 return hotplug;
3155         default:
3156                 return 0;
3157         }
3158 }
3159
3160 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3161 {
3162         intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
3163                          PORTA_HOTPLUG_ENABLE |
3164                          PORTB_HOTPLUG_ENABLE |
3165                          PORTC_HOTPLUG_ENABLE |
3166                          BXT_DDI_HPD_INVERT_MASK,
3167                          intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables));
3168 }
3169
3170 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3171 {
3172         u32 hotplug_irqs, enabled_irqs;
3173
3174         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3175         hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3176
3177         bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3178
3179         bxt_hpd_detection_setup(dev_priv);
3180 }
3181
3182 /*
3183  * SDEIER is also touched by the interrupt handler to work around missed PCH
3184  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3185  * instead we unconditionally enable all PCH interrupt sources here, but then
3186  * only unmask them as needed with SDEIMR.
3187  *
3188  * Note that we currently do this after installing the interrupt handler,
3189  * but before we enable the master interrupt. That should be sufficient
3190  * to avoid races with the irq handler, assuming we have MSI. Shared legacy
3191  * interrupts could still race.
3192  */
3193 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3194 {
3195         struct intel_uncore *uncore = &dev_priv->uncore;
3196         u32 mask;
3197
3198         if (HAS_PCH_NOP(dev_priv))
3199                 return;
3200
3201         if (HAS_PCH_IBX(dev_priv))
3202                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3203         else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3204                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3205         else
3206                 mask = SDE_GMBUS_CPT;
3207
3208         GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3209 }
3210
3211 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3212 {
3213         struct intel_uncore *uncore = &dev_priv->uncore;
3214         u32 display_mask, extra_mask;
3215
3216         if (GRAPHICS_VER(dev_priv) >= 7) {
3217                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3218                                 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3219                 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3220                               DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3221                               DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
3222                               DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
3223                               DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
3224                               DE_DP_A_HOTPLUG_IVB);
3225         } else {
3226                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3227                                 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3228                                 DE_PIPEA_CRC_DONE | DE_POISON);
3229                 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
3230                               DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3231                               DE_PLANE_FLIP_DONE(PLANE_A) |
3232                               DE_PLANE_FLIP_DONE(PLANE_B) |
3233                               DE_DP_A_HOTPLUG);
3234         }
3235
3236         if (IS_HASWELL(dev_priv)) {
3237                 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3238                 display_mask |= DE_EDP_PSR_INT_HSW;
3239         }
3240
3241         if (IS_IRONLAKE_M(dev_priv))
3242                 extra_mask |= DE_PCU_EVENT;
3243
3244         dev_priv->irq_mask = ~display_mask;
3245
3246         ibx_irq_postinstall(dev_priv);
3247
3248         gen5_gt_irq_postinstall(to_gt(dev_priv));
3249
3250         GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3251                       display_mask | extra_mask);
3252 }
3253
3254 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3255 {
3256         lockdep_assert_held(&dev_priv->irq_lock);
3257
3258         if (dev_priv->display_irqs_enabled)
3259                 return;
3260
3261         dev_priv->display_irqs_enabled = true;
3262
3263         if (intel_irqs_enabled(dev_priv)) {
3264                 vlv_display_irq_reset(dev_priv);
3265                 vlv_display_irq_postinstall(dev_priv);
3266         }
3267 }
3268
3269 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3270 {
3271         lockdep_assert_held(&dev_priv->irq_lock);
3272
3273         if (!dev_priv->display_irqs_enabled)
3274                 return;
3275
3276         dev_priv->display_irqs_enabled = false;
3277
3278         if (intel_irqs_enabled(dev_priv))
3279                 vlv_display_irq_reset(dev_priv);
3280 }
3281
3282
3283 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3284 {
3285         gen5_gt_irq_postinstall(to_gt(dev_priv));
3286
3287         spin_lock_irq(&dev_priv->irq_lock);
3288         if (dev_priv->display_irqs_enabled)
3289                 vlv_display_irq_postinstall(dev_priv);
3290         spin_unlock_irq(&dev_priv->irq_lock);
3291
3292         intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3293         intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3294 }
3295
3296 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3297 {
3298         struct intel_uncore *uncore = &dev_priv->uncore;
3299
3300         u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
3301                 GEN8_PIPE_CDCLK_CRC_DONE;
3302         u32 de_pipe_enables;
3303         u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3304         u32 de_port_enables;
3305         u32 de_misc_masked = GEN8_DE_EDP_PSR;
3306         u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3307                 BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3308         enum pipe pipe;
3309
3310         if (!HAS_DISPLAY(dev_priv))
3311                 return;
3312
3313         if (DISPLAY_VER(dev_priv) <= 10)
3314                 de_misc_masked |= GEN8_DE_MISC_GSE;
3315
3316         if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3317                 de_port_masked |= BXT_DE_PORT_GMBUS;
3318
3319         if (DISPLAY_VER(dev_priv) >= 11) {
3320                 enum port port;
3321
3322                 if (intel_bios_is_dsi_present(dev_priv, &port))
3323                         de_port_masked |= DSI0_TE | DSI1_TE;
3324         }
3325
3326         de_pipe_enables = de_pipe_masked |
3327                 GEN8_PIPE_VBLANK |
3328                 gen8_de_pipe_underrun_mask(dev_priv) |
3329                 gen8_de_pipe_flip_done_mask(dev_priv);
3330
3331         de_port_enables = de_port_masked;
3332         if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3333                 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3334         else if (IS_BROADWELL(dev_priv))
3335                 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
3336
3337         if (DISPLAY_VER(dev_priv) >= 12) {
3338                 enum transcoder trans;
3339
3340                 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3341                         enum intel_display_power_domain domain;
3342
3343                         domain = POWER_DOMAIN_TRANSCODER(trans);
3344                         if (!intel_display_power_is_enabled(dev_priv, domain))
3345                                 continue;
3346
3347                         gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
3348                 }
3349         } else {
3350                 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3351         }
3352
3353         for_each_pipe(dev_priv, pipe) {
3354                 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3355
3356                 if (intel_display_power_is_enabled(dev_priv,
3357                                 POWER_DOMAIN_PIPE(pipe)))
3358                         GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3359                                           dev_priv->de_irq_mask[pipe],
3360                                           de_pipe_enables);
3361         }
3362
3363         GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3364         GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3365
3366         if (DISPLAY_VER(dev_priv) >= 11) {
3367                 u32 de_hpd_masked = 0;
3368                 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
3369                                      GEN11_DE_TBT_HOTPLUG_MASK;
3370
3371                 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3372                               de_hpd_enables);
3373         }
3374 }
3375
3376 static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3377 {
3378         struct intel_uncore *uncore = &dev_priv->uncore;
3379         u32 mask = SDE_GMBUS_ICP;
3380
3381         GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3382 }
3383
3384 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3385 {
3386         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3387                 icp_irq_postinstall(dev_priv);
3388         else if (HAS_PCH_SPLIT(dev_priv))
3389                 ibx_irq_postinstall(dev_priv);
3390
3391         gen8_gt_irq_postinstall(to_gt(dev_priv));
3392         gen8_de_irq_postinstall(dev_priv);
3393
3394         gen8_master_intr_enable(dev_priv->uncore.regs);
3395 }
3396
3397 static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
3398 {
3399         if (!HAS_DISPLAY(dev_priv))
3400                 return;
3401
3402         gen8_de_irq_postinstall(dev_priv);
3403
3404         intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3405                            GEN11_DISPLAY_IRQ_ENABLE);
3406 }
3407
3408 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
3409 {
3410         struct intel_gt *gt = to_gt(dev_priv);
3411         struct intel_uncore *uncore = gt->uncore;
3412         u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3413
3414         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3415                 icp_irq_postinstall(dev_priv);
3416
3417         gen11_gt_irq_postinstall(gt);
3418         gen11_de_irq_postinstall(dev_priv);
3419
3420         GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3421
3422         gen11_master_intr_enable(uncore->regs);
3423         intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
3424 }
3425
3426 static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
3427 {
3428         struct intel_gt *gt = to_gt(dev_priv);
3429         struct intel_uncore *uncore = gt->uncore;
3430         u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3431
3432         gen11_gt_irq_postinstall(gt);
3433
3434         GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3435
3436         if (HAS_DISPLAY(dev_priv)) {
3437                 icp_irq_postinstall(dev_priv);
3438                 gen8_de_irq_postinstall(dev_priv);
3439                 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3440                                    GEN11_DISPLAY_IRQ_ENABLE);
3441         }
3442
3443         dg1_master_intr_enable(uncore->regs);
3444         intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
3445 }
3446
3447 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3448 {
3449         gen8_gt_irq_postinstall(to_gt(dev_priv));
3450
3451         spin_lock_irq(&dev_priv->irq_lock);
3452         if (dev_priv->display_irqs_enabled)
3453                 vlv_display_irq_postinstall(dev_priv);
3454         spin_unlock_irq(&dev_priv->irq_lock);
3455
3456         intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3457         intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3458 }
3459
3460 static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
3461 {
3462         struct intel_uncore *uncore = &dev_priv->uncore;
3463
3464         i9xx_pipestat_irq_reset(dev_priv);
3465
3466         gen2_irq_reset(uncore);
3467         dev_priv->irq_mask = ~0u;
3468 }
3469
3470 static u32 i9xx_error_mask(struct drm_i915_private *i915)
3471 {
3472         /*
3473          * On gen2/3 FBC generates (seemingly spurious)
3474          * display INVALID_GTT/INVALID_GTT_PTE table errors.
3475          *
3476          * Also gen3 bspec has this to say:
3477          * "DISPA_INVALID_GTT_PTE
3478          "  [DevNapa] : Reserved. This bit does not reflect the page
3479          "              table error for the display plane A."
3480          *
3481          * Unfortunately we can't mask off individual PGTBL_ER bits,
3482          * so we just have to mask off all page table errors via EMR.
3483          */
3484         if (HAS_FBC(i915))
3485                 return ~I915_ERROR_MEMORY_REFRESH;
3486         else
3487                 return ~(I915_ERROR_PAGE_TABLE |
3488                          I915_ERROR_MEMORY_REFRESH);
3489 }
3490
3491 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
3492 {
3493         struct intel_uncore *uncore = &dev_priv->uncore;
3494         u16 enable_mask;
3495
3496         intel_uncore_write16(uncore, EMR, i9xx_error_mask(dev_priv));
3497
3498         /* Unmask the interrupts that we always want on. */
3499         dev_priv->irq_mask =
3500                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3501                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3502                   I915_MASTER_ERROR_INTERRUPT);
3503
3504         enable_mask =
3505                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3506                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3507                 I915_MASTER_ERROR_INTERRUPT |
3508                 I915_USER_INTERRUPT;
3509
3510         gen2_irq_init(uncore, dev_priv->irq_mask, enable_mask);
3511
3512         /* Interrupt setup is already guaranteed to be single-threaded, this is
3513          * just to make the assert_spin_locked check happy. */
3514         spin_lock_irq(&dev_priv->irq_lock);
3515         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3516         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3517         spin_unlock_irq(&dev_priv->irq_lock);
3518 }
3519
3520 static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3521                                u16 *eir, u16 *eir_stuck)
3522 {
3523         struct intel_uncore *uncore = &i915->uncore;
3524         u16 emr;
3525
3526         *eir = intel_uncore_read16(uncore, EIR);
3527         intel_uncore_write16(uncore, EIR, *eir);
3528
3529         *eir_stuck = intel_uncore_read16(uncore, EIR);
3530         if (*eir_stuck == 0)
3531                 return;
3532
3533         /*
3534          * Toggle all EMR bits to make sure we get an edge
3535          * in the ISR master error bit if we don't clear
3536          * all the EIR bits. Otherwise the edge triggered
3537          * IIR on i965/g4x wouldn't notice that an interrupt
3538          * is still pending. Also some EIR bits can't be
3539          * cleared except by handling the underlying error
3540          * (or by a GPU reset) so we mask any bit that
3541          * remains set.
3542          */
3543         emr = intel_uncore_read16(uncore, EMR);
3544         intel_uncore_write16(uncore, EMR, 0xffff);
3545         intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3546 }
3547
3548 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
3549                                    u16 eir, u16 eir_stuck)
3550 {
3551         drm_dbg(&dev_priv->drm, "Master Error: EIR 0x%04x\n", eir);
3552
3553         if (eir_stuck)
3554                 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
3555                         eir_stuck);
3556
3557         drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
3558                 intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
3559 }
3560
3561 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
3562                                u32 *eir, u32 *eir_stuck)
3563 {
3564         u32 emr;
3565
3566         *eir = intel_uncore_read(&dev_priv->uncore, EIR);
3567         intel_uncore_write(&dev_priv->uncore, EIR, *eir);
3568
3569         *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
3570         if (*eir_stuck == 0)
3571                 return;
3572
3573         /*
3574          * Toggle all EMR bits to make sure we get an edge
3575          * in the ISR master error bit if we don't clear
3576          * all the EIR bits. Otherwise the edge triggered
3577          * IIR on i965/g4x wouldn't notice that an interrupt
3578          * is still pending. Also some EIR bits can't be
3579          * cleared except by handling the underlying error
3580          * (or by a GPU reset) so we mask any bit that
3581          * remains set.
3582          */
3583         emr = intel_uncore_read(&dev_priv->uncore, EMR);
3584         intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
3585         intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
3586 }
3587
3588 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
3589                                    u32 eir, u32 eir_stuck)
3590 {
3591         drm_dbg(&dev_priv->drm, "Master Error, EIR 0x%08x\n", eir);
3592
3593         if (eir_stuck)
3594                 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
3595                         eir_stuck);
3596
3597         drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
3598                 intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
3599 }
3600
3601 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3602 {
3603         struct drm_i915_private *dev_priv = arg;
3604         irqreturn_t ret = IRQ_NONE;
3605
3606         if (!intel_irqs_enabled(dev_priv))
3607                 return IRQ_NONE;
3608
3609         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3610         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3611
3612         do {
3613                 u32 pipe_stats[I915_MAX_PIPES] = {};
3614                 u16 eir = 0, eir_stuck = 0;
3615                 u16 iir;
3616
3617                 iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
3618                 if (iir == 0)
3619                         break;
3620
3621                 ret = IRQ_HANDLED;
3622
3623                 /* Call regardless, as some status bits might not be
3624                  * signalled in iir */
3625                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3626
3627                 if (iir & I915_MASTER_ERROR_INTERRUPT)
3628                         i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3629
3630                 intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
3631
3632                 if (iir & I915_USER_INTERRUPT)
3633                         intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
3634
3635                 if (iir & I915_MASTER_ERROR_INTERRUPT)
3636                         i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
3637
3638                 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3639         } while (0);
3640
3641         pmu_irq_stats(dev_priv, ret);
3642
3643         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3644
3645         return ret;
3646 }
3647
3648 static void i915_irq_reset(struct drm_i915_private *dev_priv)
3649 {
3650         struct intel_uncore *uncore = &dev_priv->uncore;
3651
3652         if (I915_HAS_HOTPLUG(dev_priv)) {
3653                 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3654                 intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_STAT, 0, 0);
3655         }
3656
3657         i9xx_pipestat_irq_reset(dev_priv);
3658
3659         GEN3_IRQ_RESET(uncore, GEN2_);
3660         dev_priv->irq_mask = ~0u;
3661 }
3662
3663 static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
3664 {
3665         struct intel_uncore *uncore = &dev_priv->uncore;
3666         u32 enable_mask;
3667
3668         intel_uncore_write(uncore, EMR, i9xx_error_mask(dev_priv));
3669
3670         /* Unmask the interrupts that we always want on. */
3671         dev_priv->irq_mask =
3672                 ~(I915_ASLE_INTERRUPT |
3673                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3674                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3675                   I915_MASTER_ERROR_INTERRUPT);
3676
3677         enable_mask =
3678                 I915_ASLE_INTERRUPT |
3679                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3680                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3681                 I915_MASTER_ERROR_INTERRUPT |
3682                 I915_USER_INTERRUPT;
3683
3684         if (I915_HAS_HOTPLUG(dev_priv)) {
3685                 /* Enable in IER... */
3686                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3687                 /* and unmask in IMR */
3688                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3689         }
3690
3691         GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3692
3693         /* Interrupt setup is already guaranteed to be single-threaded, this is
3694          * just to make the assert_spin_locked check happy. */
3695         spin_lock_irq(&dev_priv->irq_lock);
3696         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3697         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3698         spin_unlock_irq(&dev_priv->irq_lock);
3699
3700         i915_enable_asle_pipestat(dev_priv);
3701 }
3702
3703 static irqreturn_t i915_irq_handler(int irq, void *arg)
3704 {
3705         struct drm_i915_private *dev_priv = arg;
3706         irqreturn_t ret = IRQ_NONE;
3707
3708         if (!intel_irqs_enabled(dev_priv))
3709                 return IRQ_NONE;
3710
3711         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3712         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3713
3714         do {
3715                 u32 pipe_stats[I915_MAX_PIPES] = {};
3716                 u32 eir = 0, eir_stuck = 0;
3717                 u32 hotplug_status = 0;
3718                 u32 iir;
3719
3720                 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
3721                 if (iir == 0)
3722                         break;
3723
3724                 ret = IRQ_HANDLED;
3725
3726                 if (I915_HAS_HOTPLUG(dev_priv) &&
3727                     iir & I915_DISPLAY_PORT_INTERRUPT)
3728                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3729
3730                 /* Call regardless, as some status bits might not be
3731                  * signalled in iir */
3732                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3733
3734                 if (iir & I915_MASTER_ERROR_INTERRUPT)
3735                         i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3736
3737                 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
3738
3739                 if (iir & I915_USER_INTERRUPT)
3740                         intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
3741
3742                 if (iir & I915_MASTER_ERROR_INTERRUPT)
3743                         i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
3744
3745                 if (hotplug_status)
3746                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3747
3748                 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3749         } while (0);
3750
3751         pmu_irq_stats(dev_priv, ret);
3752
3753         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3754
3755         return ret;
3756 }
3757
3758 static void i965_irq_reset(struct drm_i915_private *dev_priv)
3759 {
3760         struct intel_uncore *uncore = &dev_priv->uncore;
3761
3762         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3763         intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
3764
3765         i9xx_pipestat_irq_reset(dev_priv);
3766
3767         GEN3_IRQ_RESET(uncore, GEN2_);
3768         dev_priv->irq_mask = ~0u;
3769 }
3770
3771 static u32 i965_error_mask(struct drm_i915_private *i915)
3772 {
3773         /*
3774          * Enable some error detection, note the instruction error mask
3775          * bit is reserved, so we leave it masked.
3776          *
3777          * i965 FBC no longer generates spurious GTT errors,
3778          * so we can always enable the page table errors.
3779          */
3780         if (IS_G4X(i915))
3781                 return ~(GM45_ERROR_PAGE_TABLE |
3782                          GM45_ERROR_MEM_PRIV |
3783                          GM45_ERROR_CP_PRIV |
3784                          I915_ERROR_MEMORY_REFRESH);
3785         else
3786                 return ~(I915_ERROR_PAGE_TABLE |
3787                          I915_ERROR_MEMORY_REFRESH);
3788 }
3789
3790 static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
3791 {
3792         struct intel_uncore *uncore = &dev_priv->uncore;
3793         u32 enable_mask;
3794
3795         intel_uncore_write(uncore, EMR, i965_error_mask(dev_priv));
3796
3797         /* Unmask the interrupts that we always want on. */
3798         dev_priv->irq_mask =
3799                 ~(I915_ASLE_INTERRUPT |
3800                   I915_DISPLAY_PORT_INTERRUPT |
3801                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3802                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3803                   I915_MASTER_ERROR_INTERRUPT);
3804
3805         enable_mask =
3806                 I915_ASLE_INTERRUPT |
3807                 I915_DISPLAY_PORT_INTERRUPT |
3808                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3809                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3810                 I915_MASTER_ERROR_INTERRUPT |
3811                 I915_USER_INTERRUPT;
3812
3813         if (IS_G4X(dev_priv))
3814                 enable_mask |= I915_BSD_USER_INTERRUPT;
3815
3816         GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3817
3818         /* Interrupt setup is already guaranteed to be single-threaded, this is
3819          * just to make the assert_spin_locked check happy. */
3820         spin_lock_irq(&dev_priv->irq_lock);
3821         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3822         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3823         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3824         spin_unlock_irq(&dev_priv->irq_lock);
3825
3826         i915_enable_asle_pipestat(dev_priv);
3827 }
3828
3829 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
3830 {
3831         u32 hotplug_en;
3832
3833         lockdep_assert_held(&dev_priv->irq_lock);
3834
3835         /* Note HDMI and DP share hotplug bits */
3836         /* enable bits are the same for all generations */
3837         hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
3838         /* Programming the CRT detection parameters tends
3839            to generate a spurious hotplug event about three
3840            seconds later.  So just do it once.
3841         */
3842         if (IS_G4X(dev_priv))
3843                 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3844         hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3845
3846         /* Ignore TV since it's buggy */
3847         i915_hotplug_interrupt_update_locked(dev_priv,
3848                                              HOTPLUG_INT_EN_MASK |
3849                                              CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
3850                                              CRT_HOTPLUG_ACTIVATION_PERIOD_64,
3851                                              hotplug_en);
3852 }
3853
3854 static irqreturn_t i965_irq_handler(int irq, void *arg)
3855 {
3856         struct drm_i915_private *dev_priv = arg;
3857         irqreturn_t ret = IRQ_NONE;
3858
3859         if (!intel_irqs_enabled(dev_priv))
3860                 return IRQ_NONE;
3861
3862         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3863         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3864
3865         do {
3866                 u32 pipe_stats[I915_MAX_PIPES] = {};
3867                 u32 eir = 0, eir_stuck = 0;
3868                 u32 hotplug_status = 0;
3869                 u32 iir;
3870
3871                 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
3872                 if (iir == 0)
3873                         break;
3874
3875                 ret = IRQ_HANDLED;
3876
3877                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
3878                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3879
3880                 /* Call regardless, as some status bits might not be
3881                  * signalled in iir */
3882                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3883
3884                 if (iir & I915_MASTER_ERROR_INTERRUPT)
3885                         i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3886
3887                 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
3888
3889                 if (iir & I915_USER_INTERRUPT)
3890                         intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
3891                                             iir);
3892
3893                 if (iir & I915_BSD_USER_INTERRUPT)
3894                         intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
3895                                             iir >> 25);
3896
3897                 if (iir & I915_MASTER_ERROR_INTERRUPT)
3898                         i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
3899
3900                 if (hotplug_status)
3901                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3902
3903                 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3904         } while (0);
3905
3906         pmu_irq_stats(dev_priv, IRQ_HANDLED);
3907
3908         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3909
3910         return ret;
3911 }
3912
3913 struct intel_hotplug_funcs {
3914         void (*hpd_irq_setup)(struct drm_i915_private *i915);
3915 };
3916
3917 #define HPD_FUNCS(platform)                                      \
3918 static const struct intel_hotplug_funcs platform##_hpd_funcs = { \
3919         .hpd_irq_setup = platform##_hpd_irq_setup,               \
3920 }
3921
3922 HPD_FUNCS(i915);
3923 HPD_FUNCS(dg1);
3924 HPD_FUNCS(gen11);
3925 HPD_FUNCS(bxt);
3926 HPD_FUNCS(icp);
3927 HPD_FUNCS(spt);
3928 HPD_FUNCS(ilk);
3929 #undef HPD_FUNCS
3930
3931 void intel_hpd_irq_setup(struct drm_i915_private *i915)
3932 {
3933         if (i915->display_irqs_enabled && i915->display.funcs.hotplug)
3934                 i915->display.funcs.hotplug->hpd_irq_setup(i915);
3935 }
3936
3937 /**
3938  * intel_irq_init - initializes irq support
3939  * @dev_priv: i915 device instance
3940  *
3941  * This function initializes all the irq support including work items, timers
3942  * and all the vtables. It does not setup the interrupt itself though.
3943  */
3944 void intel_irq_init(struct drm_i915_private *dev_priv)
3945 {
3946         int i;
3947
3948         INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
3949         for (i = 0; i < MAX_L3_SLICES; ++i)
3950                 dev_priv->l3_parity.remap_info[i] = NULL;
3951
3952         /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
3953         if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
3954                 to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
3955
3956         if (!HAS_DISPLAY(dev_priv))
3957                 return;
3958
3959         intel_hpd_init_pins(dev_priv);
3960
3961         intel_hpd_init_early(dev_priv);
3962
3963         dev_priv->drm.vblank_disable_immediate = true;
3964
3965         /* Most platforms treat the display irq block as an always-on
3966          * power domain. vlv/chv can disable it at runtime and need
3967          * special care to avoid writing any of the display block registers
3968          * outside of the power domain. We defer setting up the display irqs
3969          * in this case to the runtime pm.
3970          */
3971         dev_priv->display_irqs_enabled = true;
3972         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3973                 dev_priv->display_irqs_enabled = false;
3974
3975         if (HAS_GMCH(dev_priv)) {
3976                 if (I915_HAS_HOTPLUG(dev_priv))
3977                         dev_priv->display.funcs.hotplug = &i915_hpd_funcs;
3978         } else {
3979                 if (HAS_PCH_DG2(dev_priv))
3980                         dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
3981                 else if (HAS_PCH_DG1(dev_priv))
3982                         dev_priv->display.funcs.hotplug = &dg1_hpd_funcs;
3983                 else if (DISPLAY_VER(dev_priv) >= 11)
3984                         dev_priv->display.funcs.hotplug = &gen11_hpd_funcs;
3985                 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3986                         dev_priv->display.funcs.hotplug = &bxt_hpd_funcs;
3987                 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3988                         dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
3989                 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
3990                         dev_priv->display.funcs.hotplug = &spt_hpd_funcs;
3991                 else
3992                         dev_priv->display.funcs.hotplug = &ilk_hpd_funcs;
3993         }
3994 }
3995
3996 /**
3997  * intel_irq_fini - deinitializes IRQ support
3998  * @i915: i915 device instance
3999  *
4000  * This function deinitializes all the IRQ support.
4001  */
4002 void intel_irq_fini(struct drm_i915_private *i915)
4003 {
4004         int i;
4005
4006         for (i = 0; i < MAX_L3_SLICES; ++i)
4007                 kfree(i915->l3_parity.remap_info[i]);
4008 }
4009
4010 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
4011 {
4012         if (HAS_GMCH(dev_priv)) {
4013                 if (IS_CHERRYVIEW(dev_priv))
4014                         return cherryview_irq_handler;
4015                 else if (IS_VALLEYVIEW(dev_priv))
4016                         return valleyview_irq_handler;
4017                 else if (GRAPHICS_VER(dev_priv) == 4)
4018                         return i965_irq_handler;
4019                 else if (GRAPHICS_VER(dev_priv) == 3)
4020                         return i915_irq_handler;
4021                 else
4022                         return i8xx_irq_handler;
4023         } else {
4024                 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4025                         return dg1_irq_handler;
4026                 else if (GRAPHICS_VER(dev_priv) >= 11)
4027                         return gen11_irq_handler;
4028                 else if (GRAPHICS_VER(dev_priv) >= 8)
4029                         return gen8_irq_handler;
4030                 else
4031                         return ilk_irq_handler;
4032         }
4033 }
4034
4035 static void intel_irq_reset(struct drm_i915_private *dev_priv)
4036 {
4037         if (HAS_GMCH(dev_priv)) {
4038                 if (IS_CHERRYVIEW(dev_priv))
4039                         cherryview_irq_reset(dev_priv);
4040                 else if (IS_VALLEYVIEW(dev_priv))
4041                         valleyview_irq_reset(dev_priv);
4042                 else if (GRAPHICS_VER(dev_priv) == 4)
4043                         i965_irq_reset(dev_priv);
4044                 else if (GRAPHICS_VER(dev_priv) == 3)
4045                         i915_irq_reset(dev_priv);
4046                 else
4047                         i8xx_irq_reset(dev_priv);
4048         } else {
4049                 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4050                         dg1_irq_reset(dev_priv);
4051                 else if (GRAPHICS_VER(dev_priv) >= 11)
4052                         gen11_irq_reset(dev_priv);
4053                 else if (GRAPHICS_VER(dev_priv) >= 8)
4054                         gen8_irq_reset(dev_priv);
4055                 else
4056                         ilk_irq_reset(dev_priv);
4057         }
4058 }
4059
4060 static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4061 {
4062         if (HAS_GMCH(dev_priv)) {
4063                 if (IS_CHERRYVIEW(dev_priv))
4064                         cherryview_irq_postinstall(dev_priv);
4065                 else if (IS_VALLEYVIEW(dev_priv))
4066                         valleyview_irq_postinstall(dev_priv);
4067                 else if (GRAPHICS_VER(dev_priv) == 4)
4068                         i965_irq_postinstall(dev_priv);
4069                 else if (GRAPHICS_VER(dev_priv) == 3)
4070                         i915_irq_postinstall(dev_priv);
4071                 else
4072                         i8xx_irq_postinstall(dev_priv);
4073         } else {
4074                 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4075                         dg1_irq_postinstall(dev_priv);
4076                 else if (GRAPHICS_VER(dev_priv) >= 11)
4077                         gen11_irq_postinstall(dev_priv);
4078                 else if (GRAPHICS_VER(dev_priv) >= 8)
4079                         gen8_irq_postinstall(dev_priv);
4080                 else
4081                         ilk_irq_postinstall(dev_priv);
4082         }
4083 }
4084
4085 /**
4086  * intel_irq_install - enables the hardware interrupt
4087  * @dev_priv: i915 device instance
4088  *
4089  * This function enables the hardware interrupt handling, but leaves the hotplug
4090  * handling still disabled. It is called after intel_irq_init().
4091  *
4092  * In the driver load and resume code we need working interrupts in a few places
4093  * but don't want to deal with the hassle of concurrent probe and hotplug
4094  * workers. Hence the split into this two-stage approach.
4095  */
4096 int intel_irq_install(struct drm_i915_private *dev_priv)
4097 {
4098         int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4099         int ret;
4100
4101         /*
4102          * We enable some interrupt sources in our postinstall hooks, so mark
4103          * interrupts as enabled _before_ actually enabling them to avoid
4104          * special cases in our ordering checks.
4105          */
4106         dev_priv->runtime_pm.irqs_enabled = true;
4107
4108         dev_priv->irq_enabled = true;
4109
4110         intel_irq_reset(dev_priv);
4111
4112         ret = request_irq(irq, intel_irq_handler(dev_priv),
4113                           IRQF_SHARED, DRIVER_NAME, dev_priv);
4114         if (ret < 0) {
4115                 dev_priv->irq_enabled = false;
4116                 return ret;
4117         }
4118
4119         intel_irq_postinstall(dev_priv);
4120
4121         return ret;
4122 }
4123
4124 /**
4125  * intel_irq_uninstall - finilizes all irq handling
4126  * @dev_priv: i915 device instance
4127  *
4128  * This stops interrupt and hotplug handling and unregisters and frees all
4129  * resources acquired in the init functions.
4130  */
4131 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4132 {
4133         int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4134
4135         /*
4136          * FIXME we can get called twice during driver probe
4137          * error handling as well as during driver remove due to
4138          * intel_modeset_driver_remove() calling us out of sequence.
4139          * Would be nice if it didn't do that...
4140          */
4141         if (!dev_priv->irq_enabled)
4142                 return;
4143
4144         dev_priv->irq_enabled = false;
4145
4146         intel_irq_reset(dev_priv);
4147
4148         free_irq(irq, dev_priv);
4149
4150         intel_hpd_cancel_work(dev_priv);
4151         dev_priv->runtime_pm.irqs_enabled = false;
4152 }
4153
4154 /**
4155  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4156  * @dev_priv: i915 device instance
4157  *
4158  * This function is used to disable interrupts at runtime, both in the runtime
4159  * pm and the system suspend/resume code.
4160  */
4161 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4162 {
4163         intel_irq_reset(dev_priv);
4164         dev_priv->runtime_pm.irqs_enabled = false;
4165         intel_synchronize_irq(dev_priv);
4166 }
4167
4168 /**
4169  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4170  * @dev_priv: i915 device instance
4171  *
4172  * This function is used to enable interrupts at runtime, both in the runtime
4173  * pm and the system suspend/resume code.
4174  */
4175 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4176 {
4177         dev_priv->runtime_pm.irqs_enabled = true;
4178         intel_irq_reset(dev_priv);
4179         intel_irq_postinstall(dev_priv);
4180 }
4181
4182 bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4183 {
4184         return dev_priv->runtime_pm.irqs_enabled;
4185 }
4186
4187 void intel_synchronize_irq(struct drm_i915_private *i915)
4188 {
4189         synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
4190 }
4191
4192 void intel_synchronize_hardirq(struct drm_i915_private *i915)
4193 {
4194         synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
4195 }
This page took 0.273198 seconds and 4 git commands to generate.