]> Git Repo - linux.git/blob - drivers/gpu/drm/v3d/v3d_irq.c
Merge tag 'devicetree-for-4.18' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / drivers / gpu / drm / v3d / v3d_irq.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2014-2018 Broadcom */
3
4 /**
5  * DOC: Interrupt management for the V3D engine
6  *
7  * When we take a binning or rendering flush done interrupt, we need
8  * to signal the fence for that job so that the scheduler can queue up
9  * the next one and unblock any waiters.
10  *
11  * When we take the binner out of memory interrupt, we need to
12  * allocate some new memory and pass it to the binner so that the
13  * current job can make progress.
14  */
15
16 #include "v3d_drv.h"
17 #include "v3d_regs.h"
18
19 #define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM |  \
20                              V3D_INT_FLDONE |   \
21                              V3D_INT_FRDONE |   \
22                              V3D_INT_GMPV))
23
24 #define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV |       \
25                             V3D_HUB_INT_MMU_PTI |       \
26                             V3D_HUB_INT_MMU_CAP))
27
28 static void
29 v3d_overflow_mem_work(struct work_struct *work)
30 {
31         struct v3d_dev *v3d =
32                 container_of(work, struct v3d_dev, overflow_mem_work);
33         struct drm_device *dev = &v3d->drm;
34         struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024);
35         unsigned long irqflags;
36
37         if (IS_ERR(bo)) {
38                 DRM_ERROR("Couldn't allocate binner overflow mem\n");
39                 return;
40         }
41
42         /* We lost a race, and our work task came in after the bin job
43          * completed and exited.  This can happen because the HW
44          * signals OOM before it's fully OOM, so the binner might just
45          * barely complete.
46          *
47          * If we lose the race and our work task comes in after a new
48          * bin job got scheduled, that's fine.  We'll just give them
49          * some binner pool anyway.
50          */
51         spin_lock_irqsave(&v3d->job_lock, irqflags);
52         if (!v3d->bin_job) {
53                 spin_unlock_irqrestore(&v3d->job_lock, irqflags);
54                 goto out;
55         }
56
57         drm_gem_object_get(&bo->base);
58         list_add_tail(&bo->unref_head, &v3d->bin_job->unref_list);
59         spin_unlock_irqrestore(&v3d->job_lock, irqflags);
60
61         V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT);
62         V3D_CORE_WRITE(0, V3D_PTB_BPOS, bo->base.size);
63
64 out:
65         drm_gem_object_put_unlocked(&bo->base);
66 }
67
68 static irqreturn_t
69 v3d_irq(int irq, void *arg)
70 {
71         struct v3d_dev *v3d = arg;
72         u32 intsts;
73         irqreturn_t status = IRQ_NONE;
74
75         intsts = V3D_CORE_READ(0, V3D_CTL_INT_STS);
76
77         /* Acknowledge the interrupts we're handling here. */
78         V3D_CORE_WRITE(0, V3D_CTL_INT_CLR, intsts);
79
80         if (intsts & V3D_INT_OUTOMEM) {
81                 /* Note that the OOM status is edge signaled, so the
82                  * interrupt won't happen again until the we actually
83                  * add more memory.
84                  */
85                 schedule_work(&v3d->overflow_mem_work);
86                 status = IRQ_HANDLED;
87         }
88
89         if (intsts & V3D_INT_FLDONE) {
90                 v3d->queue[V3D_BIN].finished_seqno++;
91                 dma_fence_signal(v3d->bin_job->bin.done_fence);
92                 status = IRQ_HANDLED;
93         }
94
95         if (intsts & V3D_INT_FRDONE) {
96                 v3d->queue[V3D_RENDER].finished_seqno++;
97                 dma_fence_signal(v3d->render_job->render.done_fence);
98
99                 status = IRQ_HANDLED;
100         }
101
102         /* We shouldn't be triggering these if we have GMP in
103          * always-allowed mode.
104          */
105         if (intsts & V3D_INT_GMPV)
106                 dev_err(v3d->dev, "GMP violation\n");
107
108         return status;
109 }
110
111 static irqreturn_t
112 v3d_hub_irq(int irq, void *arg)
113 {
114         struct v3d_dev *v3d = arg;
115         u32 intsts;
116         irqreturn_t status = IRQ_NONE;
117
118         intsts = V3D_READ(V3D_HUB_INT_STS);
119
120         /* Acknowledge the interrupts we're handling here. */
121         V3D_WRITE(V3D_HUB_INT_CLR, intsts);
122
123         if (intsts & (V3D_HUB_INT_MMU_WRV |
124                       V3D_HUB_INT_MMU_PTI |
125                       V3D_HUB_INT_MMU_CAP)) {
126                 u32 axi_id = V3D_READ(V3D_MMU_VIO_ID);
127                 u64 vio_addr = (u64)V3D_READ(V3D_MMU_VIO_ADDR) << 8;
128
129                 dev_err(v3d->dev, "MMU error from client %d at 0x%08llx%s%s%s\n",
130                         axi_id, (long long)vio_addr,
131                         ((intsts & V3D_HUB_INT_MMU_WRV) ?
132                          ", write violation" : ""),
133                         ((intsts & V3D_HUB_INT_MMU_PTI) ?
134                          ", pte invalid" : ""),
135                         ((intsts & V3D_HUB_INT_MMU_CAP) ?
136                          ", cap exceeded" : ""));
137                 status = IRQ_HANDLED;
138         }
139
140         return status;
141 }
142
143 void
144 v3d_irq_init(struct v3d_dev *v3d)
145 {
146         int ret, core;
147
148         INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work);
149
150         /* Clear any pending interrupts someone might have left around
151          * for us.
152          */
153         for (core = 0; core < v3d->cores; core++)
154                 V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
155         V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
156
157         ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
158                                v3d_hub_irq, IRQF_SHARED,
159                                "v3d_hub", v3d);
160         ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 1),
161                                v3d_irq, IRQF_SHARED,
162                                "v3d_core0", v3d);
163         if (ret)
164                 dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
165
166         v3d_irq_enable(v3d);
167 }
168
169 void
170 v3d_irq_enable(struct v3d_dev *v3d)
171 {
172         int core;
173
174         /* Enable our set of interrupts, masking out any others. */
175         for (core = 0; core < v3d->cores; core++) {
176                 V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS);
177                 V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS);
178         }
179
180         V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS);
181         V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS);
182 }
183
184 void
185 v3d_irq_disable(struct v3d_dev *v3d)
186 {
187         int core;
188
189         /* Disable all interrupts. */
190         for (core = 0; core < v3d->cores; core++)
191                 V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0);
192         V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0);
193
194         /* Clear any pending interrupts we might have left. */
195         for (core = 0; core < v3d->cores; core++)
196                 V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
197         V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
198
199         cancel_work_sync(&v3d->overflow_mem_work);
200 }
201
202 /** Reinitializes interrupt registers when a GPU reset is performed. */
203 void v3d_irq_reset(struct v3d_dev *v3d)
204 {
205         v3d_irq_enable(v3d);
206 }
This page took 0.046068 seconds and 4 git commands to generate.