]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/i915_irq.c
Linux 6.14-rc3
[linux.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/slab.h>
32 #include <linux/sysrq.h>
33
34 #include <drm/drm_drv.h>
35
36 #include "display/intel_display_irq.h"
37 #include "display/intel_hotplug.h"
38 #include "display/intel_hotplug_irq.h"
39 #include "display/intel_lpe_audio.h"
40 #include "display/intel_psr_regs.h"
41
42 #include "gt/intel_breadcrumbs.h"
43 #include "gt/intel_gt.h"
44 #include "gt/intel_gt_irq.h"
45 #include "gt/intel_gt_pm_irq.h"
46 #include "gt/intel_gt_regs.h"
47 #include "gt/intel_rps.h"
48
49 #include "i915_driver.h"
50 #include "i915_drv.h"
51 #include "i915_irq.h"
52 #include "i915_reg.h"
53
54 /**
55  * DOC: interrupt handling
56  *
57  * These functions provide the basic support for enabling and disabling the
58  * interrupt handling support. There's a lot more functionality in i915_irq.c
59  * and related files, but that will be described in separate chapters.
60  */
61
62 /*
63  * Interrupt statistic for PMU. Increments the counter only if the
64  * interrupt originated from the GPU so interrupts from a device which
65  * shares the interrupt line are not accounted.
66  */
67 static inline void pmu_irq_stats(struct drm_i915_private *i915,
68                                  irqreturn_t res)
69 {
70         if (unlikely(res != IRQ_HANDLED))
71                 return;
72
73         /*
74          * A clever compiler translates that into INC. A not so clever one
75          * should at least prevent store tearing.
76          */
77         WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
78 }
79
80 void gen2_irq_reset(struct intel_uncore *uncore, struct i915_irq_regs regs)
81 {
82         intel_uncore_write(uncore, regs.imr, 0xffffffff);
83         intel_uncore_posting_read(uncore, regs.imr);
84
85         intel_uncore_write(uncore, regs.ier, 0);
86
87         /* IIR can theoretically queue up two events. Be paranoid. */
88         intel_uncore_write(uncore, regs.iir, 0xffffffff);
89         intel_uncore_posting_read(uncore, regs.iir);
90         intel_uncore_write(uncore, regs.iir, 0xffffffff);
91         intel_uncore_posting_read(uncore, regs.iir);
92 }
93
94 /*
95  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
96  */
97 void gen2_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
98 {
99         u32 val = intel_uncore_read(uncore, reg);
100
101         if (val == 0)
102                 return;
103
104         drm_WARN(&uncore->i915->drm, 1,
105                  "Interrupt register 0x%x is not zero: 0x%08x\n",
106                  i915_mmio_reg_offset(reg), val);
107         intel_uncore_write(uncore, reg, 0xffffffff);
108         intel_uncore_posting_read(uncore, reg);
109         intel_uncore_write(uncore, reg, 0xffffffff);
110         intel_uncore_posting_read(uncore, reg);
111 }
112
113 void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs,
114                    u32 imr_val, u32 ier_val)
115 {
116         gen2_assert_iir_is_zero(uncore, regs.iir);
117
118         intel_uncore_write(uncore, regs.ier, ier_val);
119         intel_uncore_write(uncore, regs.imr, imr_val);
120         intel_uncore_posting_read(uncore, regs.imr);
121 }
122
123 /**
124  * ivb_parity_work - Workqueue called when a parity error interrupt
125  * occurred.
126  * @work: workqueue struct
127  *
128  * Doesn't actually do anything except notify userspace. As a consequence of
129  * this event, userspace should try to remap the bad rows since statistically
130  * it is likely the same row is more likely to go bad again.
131  */
132 static void ivb_parity_work(struct work_struct *work)
133 {
134         struct drm_i915_private *dev_priv =
135                 container_of(work, typeof(*dev_priv), l3_parity.error_work);
136         struct intel_gt *gt = to_gt(dev_priv);
137         u32 error_status, row, bank, subbank;
138         char *parity_event[6];
139         u32 misccpctl;
140         u8 slice = 0;
141
142         /* We must turn off DOP level clock gating to access the L3 registers.
143          * In order to prevent a get/put style interface, acquire struct mutex
144          * any time we access those registers.
145          */
146         mutex_lock(&dev_priv->drm.struct_mutex);
147
148         /* If we've screwed up tracking, just let the interrupt fire again */
149         if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
150                 goto out;
151
152         misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL,
153                                      GEN7_DOP_CLOCK_GATE_ENABLE, 0);
154         intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
155
156         while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
157                 i915_reg_t reg;
158
159                 slice--;
160                 if (drm_WARN_ON_ONCE(&dev_priv->drm,
161                                      slice >= NUM_L3_SLICES(dev_priv)))
162                         break;
163
164                 dev_priv->l3_parity.which_slice &= ~(1<<slice);
165
166                 reg = GEN7_L3CDERRST1(slice);
167
168                 error_status = intel_uncore_read(&dev_priv->uncore, reg);
169                 row = GEN7_PARITY_ERROR_ROW(error_status);
170                 bank = GEN7_PARITY_ERROR_BANK(error_status);
171                 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
172
173                 intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
174                 intel_uncore_posting_read(&dev_priv->uncore, reg);
175
176                 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
177                 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
178                 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
179                 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
180                 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
181                 parity_event[5] = NULL;
182
183                 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
184                                    KOBJ_CHANGE, parity_event);
185
186                 drm_dbg(&dev_priv->drm,
187                         "Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
188                         slice, row, bank, subbank);
189
190                 kfree(parity_event[4]);
191                 kfree(parity_event[3]);
192                 kfree(parity_event[2]);
193                 kfree(parity_event[1]);
194         }
195
196         intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
197
198 out:
199         drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
200         spin_lock_irq(gt->irq_lock);
201         gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
202         spin_unlock_irq(gt->irq_lock);
203
204         mutex_unlock(&dev_priv->drm.struct_mutex);
205 }
206
207 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
208 {
209         struct drm_i915_private *dev_priv = arg;
210         irqreturn_t ret = IRQ_NONE;
211
212         if (!intel_irqs_enabled(dev_priv))
213                 return IRQ_NONE;
214
215         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
216         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
217
218         do {
219                 u32 iir, gt_iir, pm_iir;
220                 u32 pipe_stats[I915_MAX_PIPES] = {};
221                 u32 hotplug_status = 0;
222                 u32 ier = 0;
223
224                 gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
225                 pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
226                 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
227
228                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
229                         break;
230
231                 ret = IRQ_HANDLED;
232
233                 /*
234                  * Theory on interrupt generation, based on empirical evidence:
235                  *
236                  * x = ((VLV_IIR & VLV_IER) ||
237                  *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
238                  *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
239                  *
240                  * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
241                  * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
242                  * guarantee the CPU interrupt will be raised again even if we
243                  * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
244                  * bits this time around.
245                  */
246                 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
247                 ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
248
249                 if (gt_iir)
250                         intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
251                 if (pm_iir)
252                         intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
253
254                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
255                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
256
257                 /* Call regardless, as some status bits might not be
258                  * signalled in IIR */
259                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
260
261                 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
262                            I915_LPE_PIPE_B_INTERRUPT))
263                         intel_lpe_audio_irq_handler(dev_priv);
264
265                 /*
266                  * VLV_IIR is single buffered, and reflects the level
267                  * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
268                  */
269                 if (iir)
270                         intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
271
272                 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
273                 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
274
275                 if (gt_iir)
276                         gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
277                 if (pm_iir)
278                         gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
279
280                 if (hotplug_status)
281                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
282
283                 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
284         } while (0);
285
286         pmu_irq_stats(dev_priv, ret);
287
288         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
289
290         return ret;
291 }
292
293 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
294 {
295         struct drm_i915_private *dev_priv = arg;
296         irqreturn_t ret = IRQ_NONE;
297
298         if (!intel_irqs_enabled(dev_priv))
299                 return IRQ_NONE;
300
301         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
302         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
303
304         do {
305                 u32 master_ctl, iir;
306                 u32 pipe_stats[I915_MAX_PIPES] = {};
307                 u32 hotplug_status = 0;
308                 u32 ier = 0;
309
310                 master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
311                 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
312
313                 if (master_ctl == 0 && iir == 0)
314                         break;
315
316                 ret = IRQ_HANDLED;
317
318                 /*
319                  * Theory on interrupt generation, based on empirical evidence:
320                  *
321                  * x = ((VLV_IIR & VLV_IER) ||
322                  *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
323                  *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
324                  *
325                  * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
326                  * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
327                  * guarantee the CPU interrupt will be raised again even if we
328                  * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
329                  * bits this time around.
330                  */
331                 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
332                 ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
333
334                 gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
335
336                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
337                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
338
339                 /* Call regardless, as some status bits might not be
340                  * signalled in IIR */
341                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
342
343                 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
344                            I915_LPE_PIPE_B_INTERRUPT |
345                            I915_LPE_PIPE_C_INTERRUPT))
346                         intel_lpe_audio_irq_handler(dev_priv);
347
348                 /*
349                  * VLV_IIR is single buffered, and reflects the level
350                  * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
351                  */
352                 if (iir)
353                         intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
354
355                 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
356                 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
357
358                 if (hotplug_status)
359                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
360
361                 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
362         } while (0);
363
364         pmu_irq_stats(dev_priv, ret);
365
366         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
367
368         return ret;
369 }
370
371 /*
372  * To handle irqs with the minimum potential races with fresh interrupts, we:
373  * 1 - Disable Master Interrupt Control.
374  * 2 - Find the source(s) of the interrupt.
375  * 3 - Clear the Interrupt Identity bits (IIR).
376  * 4 - Process the interrupt(s) that had bits set in the IIRs.
377  * 5 - Re-enable Master Interrupt Control.
378  */
379 static irqreturn_t ilk_irq_handler(int irq, void *arg)
380 {
381         struct drm_i915_private *i915 = arg;
382         void __iomem * const regs = intel_uncore_regs(&i915->uncore);
383         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
384         irqreturn_t ret = IRQ_NONE;
385
386         if (unlikely(!intel_irqs_enabled(i915)))
387                 return IRQ_NONE;
388
389         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
390         disable_rpm_wakeref_asserts(&i915->runtime_pm);
391
392         /* disable master interrupt before clearing iir  */
393         de_ier = raw_reg_read(regs, DEIER);
394         raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
395
396         /* Disable south interrupts. We'll only write to SDEIIR once, so further
397          * interrupts will will be stored on its back queue, and then we'll be
398          * able to process them after we restore SDEIER (as soon as we restore
399          * it, we'll get an interrupt if SDEIIR still has something to process
400          * due to its back queue). */
401         if (!HAS_PCH_NOP(i915)) {
402                 sde_ier = raw_reg_read(regs, SDEIER);
403                 raw_reg_write(regs, SDEIER, 0);
404         }
405
406         /* Find, clear, then process each source of interrupt */
407
408         gt_iir = raw_reg_read(regs, GTIIR);
409         if (gt_iir) {
410                 raw_reg_write(regs, GTIIR, gt_iir);
411                 if (GRAPHICS_VER(i915) >= 6)
412                         gen6_gt_irq_handler(to_gt(i915), gt_iir);
413                 else
414                         gen5_gt_irq_handler(to_gt(i915), gt_iir);
415                 ret = IRQ_HANDLED;
416         }
417
418         de_iir = raw_reg_read(regs, DEIIR);
419         if (de_iir) {
420                 raw_reg_write(regs, DEIIR, de_iir);
421                 if (DISPLAY_VER(i915) >= 7)
422                         ivb_display_irq_handler(i915, de_iir);
423                 else
424                         ilk_display_irq_handler(i915, de_iir);
425                 ret = IRQ_HANDLED;
426         }
427
428         if (GRAPHICS_VER(i915) >= 6) {
429                 u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
430                 if (pm_iir) {
431                         raw_reg_write(regs, GEN6_PMIIR, pm_iir);
432                         gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
433                         ret = IRQ_HANDLED;
434                 }
435         }
436
437         raw_reg_write(regs, DEIER, de_ier);
438         if (sde_ier)
439                 raw_reg_write(regs, SDEIER, sde_ier);
440
441         pmu_irq_stats(i915, ret);
442
443         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
444         enable_rpm_wakeref_asserts(&i915->runtime_pm);
445
446         return ret;
447 }
448
449 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
450 {
451         raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
452
453         /*
454          * Now with master disabled, get a sample of level indications
455          * for this interrupt. Indications will be cleared on related acks.
456          * New indications can and will light up during processing,
457          * and will generate new interrupt after enabling master.
458          */
459         return raw_reg_read(regs, GEN8_MASTER_IRQ);
460 }
461
462 static inline void gen8_master_intr_enable(void __iomem * const regs)
463 {
464         raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
465 }
466
467 static irqreturn_t gen8_irq_handler(int irq, void *arg)
468 {
469         struct drm_i915_private *dev_priv = arg;
470         void __iomem * const regs = intel_uncore_regs(&dev_priv->uncore);
471         u32 master_ctl;
472
473         if (!intel_irqs_enabled(dev_priv))
474                 return IRQ_NONE;
475
476         master_ctl = gen8_master_intr_disable(regs);
477         if (!master_ctl) {
478                 gen8_master_intr_enable(regs);
479                 return IRQ_NONE;
480         }
481
482         /* Find, queue (onto bottom-halves), then clear each source */
483         gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
484
485         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
486         if (master_ctl & ~GEN8_GT_IRQS) {
487                 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
488                 gen8_de_irq_handler(dev_priv, master_ctl);
489                 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
490         }
491
492         gen8_master_intr_enable(regs);
493
494         pmu_irq_stats(dev_priv, IRQ_HANDLED);
495
496         return IRQ_HANDLED;
497 }
498
499 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
500 {
501         raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
502
503         /*
504          * Now with master disabled, get a sample of level indications
505          * for this interrupt. Indications will be cleared on related acks.
506          * New indications can and will light up during processing,
507          * and will generate new interrupt after enabling master.
508          */
509         return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
510 }
511
512 static inline void gen11_master_intr_enable(void __iomem * const regs)
513 {
514         raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
515 }
516
517 static irqreturn_t gen11_irq_handler(int irq, void *arg)
518 {
519         struct drm_i915_private *i915 = arg;
520         void __iomem * const regs = intel_uncore_regs(&i915->uncore);
521         struct intel_gt *gt = to_gt(i915);
522         u32 master_ctl;
523         u32 gu_misc_iir;
524
525         if (!intel_irqs_enabled(i915))
526                 return IRQ_NONE;
527
528         master_ctl = gen11_master_intr_disable(regs);
529         if (!master_ctl) {
530                 gen11_master_intr_enable(regs);
531                 return IRQ_NONE;
532         }
533
534         /* Find, queue (onto bottom-halves), then clear each source */
535         gen11_gt_irq_handler(gt, master_ctl);
536
537         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
538         if (master_ctl & GEN11_DISPLAY_IRQ)
539                 gen11_display_irq_handler(i915);
540
541         gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
542
543         gen11_master_intr_enable(regs);
544
545         gen11_gu_misc_irq_handler(i915, gu_misc_iir);
546
547         pmu_irq_stats(i915, IRQ_HANDLED);
548
549         return IRQ_HANDLED;
550 }
551
552 static inline u32 dg1_master_intr_disable(void __iomem * const regs)
553 {
554         u32 val;
555
556         /* First disable interrupts */
557         raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0);
558
559         /* Get the indication levels and ack the master unit */
560         val = raw_reg_read(regs, DG1_MSTR_TILE_INTR);
561         if (unlikely(!val))
562                 return 0;
563
564         raw_reg_write(regs, DG1_MSTR_TILE_INTR, val);
565
566         return val;
567 }
568
569 static inline void dg1_master_intr_enable(void __iomem * const regs)
570 {
571         raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
572 }
573
574 static irqreturn_t dg1_irq_handler(int irq, void *arg)
575 {
576         struct drm_i915_private * const i915 = arg;
577         struct intel_gt *gt = to_gt(i915);
578         void __iomem * const regs = intel_uncore_regs(gt->uncore);
579         u32 master_tile_ctl, master_ctl;
580         u32 gu_misc_iir;
581
582         if (!intel_irqs_enabled(i915))
583                 return IRQ_NONE;
584
585         master_tile_ctl = dg1_master_intr_disable(regs);
586         if (!master_tile_ctl) {
587                 dg1_master_intr_enable(regs);
588                 return IRQ_NONE;
589         }
590
591         /* FIXME: we only support tile 0 for now. */
592         if (master_tile_ctl & DG1_MSTR_TILE(0)) {
593                 master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
594                 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
595         } else {
596                 drm_err(&i915->drm, "Tile not supported: 0x%08x\n",
597                         master_tile_ctl);
598                 dg1_master_intr_enable(regs);
599                 return IRQ_NONE;
600         }
601
602         gen11_gt_irq_handler(gt, master_ctl);
603
604         if (master_ctl & GEN11_DISPLAY_IRQ)
605                 gen11_display_irq_handler(i915);
606
607         gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
608
609         dg1_master_intr_enable(regs);
610
611         gen11_gu_misc_irq_handler(i915, gu_misc_iir);
612
613         pmu_irq_stats(i915, IRQ_HANDLED);
614
615         return IRQ_HANDLED;
616 }
617
618 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
619 {
620         struct intel_uncore *uncore = &dev_priv->uncore;
621
622         if (HAS_PCH_NOP(dev_priv))
623                 return;
624
625         gen2_irq_reset(uncore, SDE_IRQ_REGS);
626
627         if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
628                 intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
629 }
630
631 /* drm_dma.h hooks
632 */
633 static void ilk_irq_reset(struct drm_i915_private *dev_priv)
634 {
635         struct intel_uncore *uncore = &dev_priv->uncore;
636
637         gen2_irq_reset(uncore, DE_IRQ_REGS);
638         dev_priv->irq_mask = ~0u;
639
640         if (GRAPHICS_VER(dev_priv) == 7)
641                 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
642
643         if (IS_HASWELL(dev_priv)) {
644                 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
645                 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
646         }
647
648         gen5_gt_irq_reset(to_gt(dev_priv));
649
650         ibx_irq_reset(dev_priv);
651 }
652
653 static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
654 {
655         intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
656         intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
657
658         gen5_gt_irq_reset(to_gt(dev_priv));
659
660         spin_lock_irq(&dev_priv->irq_lock);
661         vlv_display_irq_reset(dev_priv);
662         spin_unlock_irq(&dev_priv->irq_lock);
663 }
664
665 static void gen8_irq_reset(struct drm_i915_private *dev_priv)
666 {
667         struct intel_uncore *uncore = &dev_priv->uncore;
668
669         gen8_master_intr_disable(intel_uncore_regs(uncore));
670
671         gen8_gt_irq_reset(to_gt(dev_priv));
672         gen8_display_irq_reset(dev_priv);
673         gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
674
675         if (HAS_PCH_SPLIT(dev_priv))
676                 ibx_irq_reset(dev_priv);
677
678 }
679
680 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
681 {
682         struct intel_gt *gt = to_gt(dev_priv);
683         struct intel_uncore *uncore = gt->uncore;
684
685         gen11_master_intr_disable(intel_uncore_regs(&dev_priv->uncore));
686
687         gen11_gt_irq_reset(gt);
688         gen11_display_irq_reset(dev_priv);
689
690         gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS);
691         gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
692 }
693
694 static void dg1_irq_reset(struct drm_i915_private *dev_priv)
695 {
696         struct intel_uncore *uncore = &dev_priv->uncore;
697         struct intel_gt *gt;
698         unsigned int i;
699
700         dg1_master_intr_disable(intel_uncore_regs(&dev_priv->uncore));
701
702         for_each_gt(gt, dev_priv, i)
703                 gen11_gt_irq_reset(gt);
704
705         gen11_display_irq_reset(dev_priv);
706
707         gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS);
708         gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
709
710         intel_uncore_write(uncore, GEN11_GFX_MSTR_IRQ, ~0);
711 }
712
713 static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
714 {
715         struct intel_uncore *uncore = &dev_priv->uncore;
716
717         intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
718         intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
719
720         gen8_gt_irq_reset(to_gt(dev_priv));
721
722         gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
723
724         spin_lock_irq(&dev_priv->irq_lock);
725         vlv_display_irq_reset(dev_priv);
726         spin_unlock_irq(&dev_priv->irq_lock);
727 }
728
729 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
730 {
731         gen5_gt_irq_postinstall(to_gt(dev_priv));
732
733         ilk_de_irq_postinstall(dev_priv);
734 }
735
736 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
737 {
738         gen5_gt_irq_postinstall(to_gt(dev_priv));
739
740         spin_lock_irq(&dev_priv->irq_lock);
741         vlv_display_irq_postinstall(dev_priv);
742         spin_unlock_irq(&dev_priv->irq_lock);
743
744         intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
745         intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
746 }
747
748 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
749 {
750         gen8_gt_irq_postinstall(to_gt(dev_priv));
751         gen8_de_irq_postinstall(dev_priv);
752
753         gen8_master_intr_enable(intel_uncore_regs(&dev_priv->uncore));
754 }
755
756 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
757 {
758         struct intel_gt *gt = to_gt(dev_priv);
759         struct intel_uncore *uncore = gt->uncore;
760         u32 gu_misc_masked = GEN11_GU_MISC_GSE;
761
762         gen11_gt_irq_postinstall(gt);
763         gen11_de_irq_postinstall(dev_priv);
764
765         gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked);
766
767         gen11_master_intr_enable(intel_uncore_regs(uncore));
768         intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
769 }
770
771 static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
772 {
773         struct intel_uncore *uncore = &dev_priv->uncore;
774         u32 gu_misc_masked = GEN11_GU_MISC_GSE;
775         struct intel_gt *gt;
776         unsigned int i;
777
778         for_each_gt(gt, dev_priv, i)
779                 gen11_gt_irq_postinstall(gt);
780
781         gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked);
782
783         dg1_de_irq_postinstall(dev_priv);
784
785         dg1_master_intr_enable(intel_uncore_regs(uncore));
786         intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
787 }
788
789 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
790 {
791         gen8_gt_irq_postinstall(to_gt(dev_priv));
792
793         spin_lock_irq(&dev_priv->irq_lock);
794         vlv_display_irq_postinstall(dev_priv);
795         spin_unlock_irq(&dev_priv->irq_lock);
796
797         intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
798         intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
799 }
800
801 static u32 i9xx_error_mask(struct drm_i915_private *i915)
802 {
803         /*
804          * On gen2/3 FBC generates (seemingly spurious)
805          * display INVALID_GTT/INVALID_GTT_PTE table errors.
806          *
807          * Also gen3 bspec has this to say:
808          * "DISPA_INVALID_GTT_PTE
809          "  [DevNapa] : Reserved. This bit does not reflect the page
810          "              table error for the display plane A."
811          *
812          * Unfortunately we can't mask off individual PGTBL_ER bits,
813          * so we just have to mask off all page table errors via EMR.
814          */
815         if (HAS_FBC(i915))
816                 return ~I915_ERROR_MEMORY_REFRESH;
817         else
818                 return ~(I915_ERROR_PAGE_TABLE |
819                          I915_ERROR_MEMORY_REFRESH);
820 }
821
822 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
823                                u32 *eir, u32 *eir_stuck)
824 {
825         u32 emr;
826
827         *eir = intel_uncore_read(&dev_priv->uncore, EIR);
828         intel_uncore_write(&dev_priv->uncore, EIR, *eir);
829
830         *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
831         if (*eir_stuck == 0)
832                 return;
833
834         /*
835          * Toggle all EMR bits to make sure we get an edge
836          * in the ISR master error bit if we don't clear
837          * all the EIR bits. Otherwise the edge triggered
838          * IIR on i965/g4x wouldn't notice that an interrupt
839          * is still pending. Also some EIR bits can't be
840          * cleared except by handling the underlying error
841          * (or by a GPU reset) so we mask any bit that
842          * remains set.
843          */
844         emr = intel_uncore_read(&dev_priv->uncore, EMR);
845         intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
846         intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
847 }
848
849 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
850                                    u32 eir, u32 eir_stuck)
851 {
852         drm_dbg(&dev_priv->drm, "Master Error, EIR 0x%08x\n", eir);
853
854         if (eir_stuck)
855                 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
856                         eir_stuck);
857
858         drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
859                 intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
860 }
861
862 static void i915_irq_reset(struct drm_i915_private *dev_priv)
863 {
864         struct intel_uncore *uncore = &dev_priv->uncore;
865
866         i9xx_display_irq_reset(dev_priv);
867
868         gen2_irq_reset(uncore, GEN2_IRQ_REGS);
869         dev_priv->irq_mask = ~0u;
870 }
871
872 static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
873 {
874         struct intel_uncore *uncore = &dev_priv->uncore;
875         u32 enable_mask;
876
877         intel_uncore_write(uncore, EMR, i9xx_error_mask(dev_priv));
878
879         dev_priv->irq_mask =
880                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
881                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
882                   I915_MASTER_ERROR_INTERRUPT);
883
884         enable_mask =
885                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
886                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
887                 I915_MASTER_ERROR_INTERRUPT |
888                 I915_USER_INTERRUPT;
889
890         if (DISPLAY_VER(dev_priv) >= 3) {
891                 dev_priv->irq_mask &= ~I915_ASLE_INTERRUPT;
892                 enable_mask |= I915_ASLE_INTERRUPT;
893         }
894
895         if (I915_HAS_HOTPLUG(dev_priv)) {
896                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
897                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
898         }
899
900         gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask);
901
902         /* Interrupt setup is already guaranteed to be single-threaded, this is
903          * just to make the assert_spin_locked check happy. */
904         spin_lock_irq(&dev_priv->irq_lock);
905         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
906         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
907         spin_unlock_irq(&dev_priv->irq_lock);
908
909         i915_enable_asle_pipestat(dev_priv);
910 }
911
912 static irqreturn_t i915_irq_handler(int irq, void *arg)
913 {
914         struct drm_i915_private *dev_priv = arg;
915         irqreturn_t ret = IRQ_NONE;
916
917         if (!intel_irqs_enabled(dev_priv))
918                 return IRQ_NONE;
919
920         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
921         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
922
923         do {
924                 u32 pipe_stats[I915_MAX_PIPES] = {};
925                 u32 eir = 0, eir_stuck = 0;
926                 u32 hotplug_status = 0;
927                 u32 iir;
928
929                 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
930                 if (iir == 0)
931                         break;
932
933                 ret = IRQ_HANDLED;
934
935                 if (I915_HAS_HOTPLUG(dev_priv) &&
936                     iir & I915_DISPLAY_PORT_INTERRUPT)
937                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
938
939                 /* Call regardless, as some status bits might not be
940                  * signalled in IIR */
941                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
942
943                 if (iir & I915_MASTER_ERROR_INTERRUPT)
944                         i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
945
946                 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
947
948                 if (iir & I915_USER_INTERRUPT)
949                         intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
950
951                 if (iir & I915_MASTER_ERROR_INTERRUPT)
952                         i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
953
954                 if (hotplug_status)
955                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
956
957                 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
958         } while (0);
959
960         pmu_irq_stats(dev_priv, ret);
961
962         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
963
964         return ret;
965 }
966
967 static void i965_irq_reset(struct drm_i915_private *dev_priv)
968 {
969         struct intel_uncore *uncore = &dev_priv->uncore;
970
971         i9xx_display_irq_reset(dev_priv);
972
973         gen2_irq_reset(uncore, GEN2_IRQ_REGS);
974         dev_priv->irq_mask = ~0u;
975 }
976
977 static u32 i965_error_mask(struct drm_i915_private *i915)
978 {
979         /*
980          * Enable some error detection, note the instruction error mask
981          * bit is reserved, so we leave it masked.
982          *
983          * i965 FBC no longer generates spurious GTT errors,
984          * so we can always enable the page table errors.
985          */
986         if (IS_G4X(i915))
987                 return ~(GM45_ERROR_PAGE_TABLE |
988                          GM45_ERROR_MEM_PRIV |
989                          GM45_ERROR_CP_PRIV |
990                          I915_ERROR_MEMORY_REFRESH);
991         else
992                 return ~(I915_ERROR_PAGE_TABLE |
993                          I915_ERROR_MEMORY_REFRESH);
994 }
995
996 static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
997 {
998         struct intel_uncore *uncore = &dev_priv->uncore;
999         u32 enable_mask;
1000
1001         intel_uncore_write(uncore, EMR, i965_error_mask(dev_priv));
1002
1003         dev_priv->irq_mask =
1004                 ~(I915_ASLE_INTERRUPT |
1005                   I915_DISPLAY_PORT_INTERRUPT |
1006                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1007                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1008                   I915_MASTER_ERROR_INTERRUPT);
1009
1010         enable_mask =
1011                 I915_ASLE_INTERRUPT |
1012                 I915_DISPLAY_PORT_INTERRUPT |
1013                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1014                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1015                 I915_MASTER_ERROR_INTERRUPT |
1016                 I915_USER_INTERRUPT;
1017
1018         if (IS_G4X(dev_priv))
1019                 enable_mask |= I915_BSD_USER_INTERRUPT;
1020
1021         gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask);
1022
1023         /* Interrupt setup is already guaranteed to be single-threaded, this is
1024          * just to make the assert_spin_locked check happy. */
1025         spin_lock_irq(&dev_priv->irq_lock);
1026         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
1027         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
1028         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
1029         spin_unlock_irq(&dev_priv->irq_lock);
1030
1031         i915_enable_asle_pipestat(dev_priv);
1032 }
1033
1034 static irqreturn_t i965_irq_handler(int irq, void *arg)
1035 {
1036         struct drm_i915_private *dev_priv = arg;
1037         irqreturn_t ret = IRQ_NONE;
1038
1039         if (!intel_irqs_enabled(dev_priv))
1040                 return IRQ_NONE;
1041
1042         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1043         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1044
1045         do {
1046                 u32 pipe_stats[I915_MAX_PIPES] = {};
1047                 u32 eir = 0, eir_stuck = 0;
1048                 u32 hotplug_status = 0;
1049                 u32 iir;
1050
1051                 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
1052                 if (iir == 0)
1053                         break;
1054
1055                 ret = IRQ_HANDLED;
1056
1057                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1058                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1059
1060                 /* Call regardless, as some status bits might not be
1061                  * signalled in IIR */
1062                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1063
1064                 if (iir & I915_MASTER_ERROR_INTERRUPT)
1065                         i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
1066
1067                 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
1068
1069                 if (iir & I915_USER_INTERRUPT)
1070                         intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
1071                                             iir);
1072
1073                 if (iir & I915_BSD_USER_INTERRUPT)
1074                         intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
1075                                             iir >> 25);
1076
1077                 if (iir & I915_MASTER_ERROR_INTERRUPT)
1078                         i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
1079
1080                 if (hotplug_status)
1081                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1082
1083                 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
1084         } while (0);
1085
1086         pmu_irq_stats(dev_priv, IRQ_HANDLED);
1087
1088         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1089
1090         return ret;
1091 }
1092
1093 /**
1094  * intel_irq_init - initializes irq support
1095  * @dev_priv: i915 device instance
1096  *
1097  * This function initializes all the irq support including work items, timers
1098  * and all the vtables. It does not setup the interrupt itself though.
1099  */
1100 void intel_irq_init(struct drm_i915_private *dev_priv)
1101 {
1102         int i;
1103
1104         INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
1105         for (i = 0; i < MAX_L3_SLICES; ++i)
1106                 dev_priv->l3_parity.remap_info[i] = NULL;
1107
1108         /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
1109         if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
1110                 to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
1111 }
1112
1113 /**
1114  * intel_irq_fini - deinitializes IRQ support
1115  * @i915: i915 device instance
1116  *
1117  * This function deinitializes all the IRQ support.
1118  */
1119 void intel_irq_fini(struct drm_i915_private *i915)
1120 {
1121         int i;
1122
1123         for (i = 0; i < MAX_L3_SLICES; ++i)
1124                 kfree(i915->l3_parity.remap_info[i]);
1125 }
1126
1127 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
1128 {
1129         if (HAS_GMCH(dev_priv)) {
1130                 if (IS_CHERRYVIEW(dev_priv))
1131                         return cherryview_irq_handler;
1132                 else if (IS_VALLEYVIEW(dev_priv))
1133                         return valleyview_irq_handler;
1134                 else if (GRAPHICS_VER(dev_priv) == 4)
1135                         return i965_irq_handler;
1136                 else
1137                         return i915_irq_handler;
1138         } else {
1139                 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
1140                         return dg1_irq_handler;
1141                 else if (GRAPHICS_VER(dev_priv) >= 11)
1142                         return gen11_irq_handler;
1143                 else if (GRAPHICS_VER(dev_priv) >= 8)
1144                         return gen8_irq_handler;
1145                 else
1146                         return ilk_irq_handler;
1147         }
1148 }
1149
1150 static void intel_irq_reset(struct drm_i915_private *dev_priv)
1151 {
1152         if (HAS_GMCH(dev_priv)) {
1153                 if (IS_CHERRYVIEW(dev_priv))
1154                         cherryview_irq_reset(dev_priv);
1155                 else if (IS_VALLEYVIEW(dev_priv))
1156                         valleyview_irq_reset(dev_priv);
1157                 else if (GRAPHICS_VER(dev_priv) == 4)
1158                         i965_irq_reset(dev_priv);
1159                 else
1160                         i915_irq_reset(dev_priv);
1161         } else {
1162                 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
1163                         dg1_irq_reset(dev_priv);
1164                 else if (GRAPHICS_VER(dev_priv) >= 11)
1165                         gen11_irq_reset(dev_priv);
1166                 else if (GRAPHICS_VER(dev_priv) >= 8)
1167                         gen8_irq_reset(dev_priv);
1168                 else
1169                         ilk_irq_reset(dev_priv);
1170         }
1171 }
1172
1173 static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
1174 {
1175         if (HAS_GMCH(dev_priv)) {
1176                 if (IS_CHERRYVIEW(dev_priv))
1177                         cherryview_irq_postinstall(dev_priv);
1178                 else if (IS_VALLEYVIEW(dev_priv))
1179                         valleyview_irq_postinstall(dev_priv);
1180                 else if (GRAPHICS_VER(dev_priv) == 4)
1181                         i965_irq_postinstall(dev_priv);
1182                 else
1183                         i915_irq_postinstall(dev_priv);
1184         } else {
1185                 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
1186                         dg1_irq_postinstall(dev_priv);
1187                 else if (GRAPHICS_VER(dev_priv) >= 11)
1188                         gen11_irq_postinstall(dev_priv);
1189                 else if (GRAPHICS_VER(dev_priv) >= 8)
1190                         gen8_irq_postinstall(dev_priv);
1191                 else
1192                         ilk_irq_postinstall(dev_priv);
1193         }
1194 }
1195
1196 /**
1197  * intel_irq_install - enables the hardware interrupt
1198  * @dev_priv: i915 device instance
1199  *
1200  * This function enables the hardware interrupt handling, but leaves the hotplug
1201  * handling still disabled. It is called after intel_irq_init().
1202  *
1203  * In the driver load and resume code we need working interrupts in a few places
1204  * but don't want to deal with the hassle of concurrent probe and hotplug
1205  * workers. Hence the split into this two-stage approach.
1206  */
1207 int intel_irq_install(struct drm_i915_private *dev_priv)
1208 {
1209         int irq = to_pci_dev(dev_priv->drm.dev)->irq;
1210         int ret;
1211
1212         /*
1213          * We enable some interrupt sources in our postinstall hooks, so mark
1214          * interrupts as enabled _before_ actually enabling them to avoid
1215          * special cases in our ordering checks.
1216          */
1217         dev_priv->irqs_enabled = true;
1218
1219         intel_irq_reset(dev_priv);
1220
1221         ret = request_irq(irq, intel_irq_handler(dev_priv),
1222                           IRQF_SHARED, DRIVER_NAME, dev_priv);
1223         if (ret < 0) {
1224                 dev_priv->irqs_enabled = false;
1225                 return ret;
1226         }
1227
1228         intel_irq_postinstall(dev_priv);
1229
1230         return ret;
1231 }
1232
1233 /**
1234  * intel_irq_uninstall - finilizes all irq handling
1235  * @dev_priv: i915 device instance
1236  *
1237  * This stops interrupt and hotplug handling and unregisters and frees all
1238  * resources acquired in the init functions.
1239  */
1240 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
1241 {
1242         int irq = to_pci_dev(dev_priv->drm.dev)->irq;
1243
1244         if (drm_WARN_ON(&dev_priv->drm, !dev_priv->irqs_enabled))
1245                 return;
1246
1247         intel_irq_reset(dev_priv);
1248
1249         free_irq(irq, dev_priv);
1250
1251         intel_hpd_cancel_work(dev_priv);
1252         dev_priv->irqs_enabled = false;
1253 }
1254
1255 /**
1256  * intel_irq_suspend - Suspend interrupts
1257  * @i915: i915 device instance
1258  *
1259  * This function is used to disable interrupts at runtime.
1260  */
1261 void intel_irq_suspend(struct drm_i915_private *i915)
1262 {
1263         intel_irq_reset(i915);
1264         i915->irqs_enabled = false;
1265         intel_synchronize_irq(i915);
1266 }
1267
1268 /**
1269  * intel_irq_resume - Resume interrupts
1270  * @i915: i915 device instance
1271  *
1272  * This function is used to enable interrupts at runtime.
1273  */
1274 void intel_irq_resume(struct drm_i915_private *i915)
1275 {
1276         i915->irqs_enabled = true;
1277         intel_irq_reset(i915);
1278         intel_irq_postinstall(i915);
1279 }
1280
1281 bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
1282 {
1283         return dev_priv->irqs_enabled;
1284 }
1285
1286 void intel_synchronize_irq(struct drm_i915_private *i915)
1287 {
1288         synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
1289 }
1290
1291 void intel_synchronize_hardirq(struct drm_i915_private *i915)
1292 {
1293         synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
1294 }
This page took 0.108825 seconds and 4 git commands to generate.