1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2014-2018 Broadcom */
5 * DOC: Interrupt management for the V3D engine
7 * When we take a bin, render, or TFU done interrupt, we need to
8 * signal the fence for that job so that the scheduler can queue up
9 * the next one and unblock any waiters.
11 * When we take the binner out of memory interrupt, we need to
12 * allocate some new memory and pass it to the binner so that the
13 * current job can make progress.
18 #include "v3d_trace.h"
20 #define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \
25 #define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \
26 V3D_HUB_INT_MMU_PTI | \
27 V3D_HUB_INT_MMU_CAP | \
31 v3d_hub_irq(int irq, void *arg);
34 v3d_overflow_mem_work(struct work_struct *work)
37 container_of(work, struct v3d_dev, overflow_mem_work);
38 struct drm_device *dev = &v3d->drm;
39 struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024);
40 struct drm_gem_object *obj;
41 unsigned long irqflags;
44 DRM_ERROR("Couldn't allocate binner overflow mem\n");
49 /* We lost a race, and our work task came in after the bin job
50 * completed and exited. This can happen because the HW
51 * signals OOM before it's fully OOM, so the binner might just
54 * If we lose the race and our work task comes in after a new
55 * bin job got scheduled, that's fine. We'll just give them
56 * some binner pool anyway.
58 spin_lock_irqsave(&v3d->job_lock, irqflags);
60 spin_unlock_irqrestore(&v3d->job_lock, irqflags);
64 drm_gem_object_get(obj);
65 list_add_tail(&bo->unref_head, &v3d->bin_job->unref_list);
66 spin_unlock_irqrestore(&v3d->job_lock, irqflags);
68 V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT);
69 V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size);
72 drm_gem_object_put_unlocked(obj);
76 v3d_irq(int irq, void *arg)
78 struct v3d_dev *v3d = arg;
80 irqreturn_t status = IRQ_NONE;
82 intsts = V3D_CORE_READ(0, V3D_CTL_INT_STS);
84 /* Acknowledge the interrupts we're handling here. */
85 V3D_CORE_WRITE(0, V3D_CTL_INT_CLR, intsts);
87 if (intsts & V3D_INT_OUTOMEM) {
88 /* Note that the OOM status is edge signaled, so the
89 * interrupt won't happen again until the we actually
90 * add more memory. Also, as of V3D 4.1, FLDONE won't
91 * be reported until any OOM state has been cleared.
93 schedule_work(&v3d->overflow_mem_work);
97 if (intsts & V3D_INT_FLDONE) {
98 struct v3d_fence *fence =
99 to_v3d_fence(v3d->bin_job->bin.irq_fence);
101 trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
102 dma_fence_signal(&fence->base);
103 status = IRQ_HANDLED;
106 if (intsts & V3D_INT_FRDONE) {
107 struct v3d_fence *fence =
108 to_v3d_fence(v3d->render_job->render.irq_fence);
110 trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
111 dma_fence_signal(&fence->base);
112 status = IRQ_HANDLED;
115 /* We shouldn't be triggering these if we have GMP in
116 * always-allowed mode.
118 if (intsts & V3D_INT_GMPV)
119 dev_err(v3d->dev, "GMP violation\n");
121 /* V3D 4.2 wires the hub and core IRQs together, so if we &
122 * didn't see the common one then check hub for MMU IRQs.
124 if (v3d->single_irq_line && status == IRQ_NONE)
125 return v3d_hub_irq(irq, arg);
131 v3d_hub_irq(int irq, void *arg)
133 struct v3d_dev *v3d = arg;
135 irqreturn_t status = IRQ_NONE;
137 intsts = V3D_READ(V3D_HUB_INT_STS);
139 /* Acknowledge the interrupts we're handling here. */
140 V3D_WRITE(V3D_HUB_INT_CLR, intsts);
142 if (intsts & V3D_HUB_INT_TFUC) {
143 struct v3d_fence *fence =
144 to_v3d_fence(v3d->tfu_job->irq_fence);
146 trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
147 dma_fence_signal(&fence->base);
148 status = IRQ_HANDLED;
151 if (intsts & (V3D_HUB_INT_MMU_WRV |
152 V3D_HUB_INT_MMU_PTI |
153 V3D_HUB_INT_MMU_CAP)) {
154 u32 axi_id = V3D_READ(V3D_MMU_VIO_ID);
155 u64 vio_addr = (u64)V3D_READ(V3D_MMU_VIO_ADDR) << 8;
157 dev_err(v3d->dev, "MMU error from client %d at 0x%08llx%s%s%s\n",
158 axi_id, (long long)vio_addr,
159 ((intsts & V3D_HUB_INT_MMU_WRV) ?
160 ", write violation" : ""),
161 ((intsts & V3D_HUB_INT_MMU_PTI) ?
162 ", pte invalid" : ""),
163 ((intsts & V3D_HUB_INT_MMU_CAP) ?
164 ", cap exceeded" : ""));
165 status = IRQ_HANDLED;
172 v3d_irq_init(struct v3d_dev *v3d)
176 INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work);
178 /* Clear any pending interrupts someone might have left around
181 for (core = 0; core < v3d->cores; core++)
182 V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
183 V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
185 irq1 = platform_get_irq(v3d->pdev, 1);
186 if (irq1 == -EPROBE_DEFER)
189 ret = devm_request_irq(v3d->dev, irq1,
190 v3d_irq, IRQF_SHARED,
194 ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
195 v3d_hub_irq, IRQF_SHARED,
200 v3d->single_irq_line = true;
202 ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
203 v3d_irq, IRQF_SHARED,
213 if (ret != -EPROBE_DEFER)
214 dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
219 v3d_irq_enable(struct v3d_dev *v3d)
223 /* Enable our set of interrupts, masking out any others. */
224 for (core = 0; core < v3d->cores; core++) {
225 V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS);
226 V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS);
229 V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS);
230 V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS);
234 v3d_irq_disable(struct v3d_dev *v3d)
238 /* Disable all interrupts. */
239 for (core = 0; core < v3d->cores; core++)
240 V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0);
241 V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0);
243 /* Clear any pending interrupts we might have left. */
244 for (core = 0; core < v3d->cores; core++)
245 V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
246 V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
248 cancel_work_sync(&v3d->overflow_mem_work);
251 /** Reinitializes interrupt registers when a GPU reset is performed. */
252 void v3d_irq_reset(struct v3d_dev *v3d)