]> Git Repo - linux.git/blob - drivers/gpu/drm/msm/adreno/a5xx_gpu.c
Linux 6.14-rc3
[linux.git] / drivers / gpu / drm / msm / adreno / a5xx_gpu.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
3  */
4
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/cpumask.h>
8 #include <linux/firmware/qcom/qcom_scm.h>
9 #include <linux/pm_opp.h>
10 #include <linux/nvmem-consumer.h>
11 #include <linux/slab.h>
12 #include "msm_gem.h"
13 #include "msm_mmu.h"
14 #include "a5xx_gpu.h"
15
16 extern bool hang_debug;
17 static void a5xx_dump(struct msm_gpu *gpu);
18
19 #define GPU_PAS_ID 13
20
21 static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
22 {
23         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
24         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
25
26         if (a5xx_gpu->has_whereami) {
27                 OUT_PKT7(ring, CP_WHERE_AM_I, 2);
28                 OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring)));
29                 OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring)));
30         }
31 }
32
33 void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
34                 bool sync)
35 {
36         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
37         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
38         uint32_t wptr;
39         unsigned long flags;
40
41         /*
42          * Most flush operations need to issue a WHERE_AM_I opcode to sync up
43          * the rptr shadow
44          */
45         if (sync)
46                 update_shadow_rptr(gpu, ring);
47
48         spin_lock_irqsave(&ring->preempt_lock, flags);
49
50         /* Copy the shadow to the actual register */
51         ring->cur = ring->next;
52
53         /* Make sure to wrap wptr if we need to */
54         wptr = get_wptr(ring);
55
56         spin_unlock_irqrestore(&ring->preempt_lock, flags);
57
58         /* Make sure everything is posted before making a decision */
59         mb();
60
61         /* Update HW if this is the current ring and we are not in preempt */
62         if (a5xx_gpu->cur_ring == ring && !a5xx_in_preempt(a5xx_gpu))
63                 gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
64 }
65
66 static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
67 {
68         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
69         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
70         struct msm_ringbuffer *ring = submit->ring;
71         struct drm_gem_object *obj;
72         uint32_t *ptr, dwords;
73         unsigned int i;
74
75         for (i = 0; i < submit->nr_cmds; i++) {
76                 switch (submit->cmd[i].type) {
77                 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
78                         break;
79                 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
80                         if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
81                                 break;
82                         fallthrough;
83                 case MSM_SUBMIT_CMD_BUF:
84                         /* copy commands into RB: */
85                         obj = submit->bos[submit->cmd[i].idx].obj;
86                         dwords = submit->cmd[i].size;
87
88                         ptr = msm_gem_get_vaddr(obj);
89
90                         /* _get_vaddr() shouldn't fail at this point,
91                          * since we've already mapped it once in
92                          * submit_reloc()
93                          */
94                         if (WARN_ON(IS_ERR_OR_NULL(ptr)))
95                                 return;
96
97                         for (i = 0; i < dwords; i++) {
98                                 /* normally the OUT_PKTn() would wait
99                                  * for space for the packet.  But since
100                                  * we just OUT_RING() the whole thing,
101                                  * need to call adreno_wait_ring()
102                                  * ourself:
103                                  */
104                                 adreno_wait_ring(ring, 1);
105                                 OUT_RING(ring, ptr[i]);
106                         }
107
108                         msm_gem_put_vaddr(obj);
109
110                         break;
111                 }
112         }
113
114         a5xx_gpu->last_seqno[ring->id] = submit->seqno;
115         a5xx_flush(gpu, ring, true);
116         a5xx_preempt_trigger(gpu);
117
118         /* we might not necessarily have a cmd from userspace to
119          * trigger an event to know that submit has completed, so
120          * do this manually:
121          */
122         a5xx_idle(gpu, ring);
123         ring->memptrs->fence = submit->seqno;
124         msm_gpu_retire(gpu);
125 }
126
127 static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
128 {
129         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
130         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
131         struct msm_ringbuffer *ring = submit->ring;
132         unsigned int i, ibs = 0;
133
134         if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
135                 ring->cur_ctx_seqno = 0;
136                 a5xx_submit_in_rb(gpu, submit);
137                 return;
138         }
139
140         OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
141         OUT_RING(ring, 0x02);
142
143         /* Turn off protected mode to write to special registers */
144         OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
145         OUT_RING(ring, 0);
146
147         /* Set the save preemption record for the ring/command */
148         OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
149         OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
150         OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
151
152         /* Turn back on protected mode */
153         OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
154         OUT_RING(ring, 1);
155
156         /*
157          * Disable local preemption by default because it requires
158          * user-space to be aware of it and provide additional handling
159          * to restore rendering state or do various flushes on switch.
160          */
161         OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
162         OUT_RING(ring, 0x0);
163
164         /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
165         OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
166         OUT_RING(ring, 0x02);
167
168         /* Submit the commands */
169         for (i = 0; i < submit->nr_cmds; i++) {
170                 switch (submit->cmd[i].type) {
171                 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
172                         break;
173                 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
174                         if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
175                                 break;
176                         fallthrough;
177                 case MSM_SUBMIT_CMD_BUF:
178                         OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
179                         OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
180                         OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
181                         OUT_RING(ring, submit->cmd[i].size);
182                         ibs++;
183                         break;
184                 }
185
186                 /*
187                  * Periodically update shadow-wptr if needed, so that we
188                  * can see partial progress of submits with large # of
189                  * cmds.. otherwise we could needlessly stall waiting for
190                  * ringbuffer state, simply due to looking at a shadow
191                  * rptr value that has not been updated
192                  */
193                 if ((ibs % 32) == 0)
194                         update_shadow_rptr(gpu, ring);
195         }
196
197         /*
198          * Write the render mode to NULL (0) to indicate to the CP that the IBs
199          * are done rendering - otherwise a lucky preemption would start
200          * replaying from the last checkpoint
201          */
202         OUT_PKT7(ring, CP_SET_RENDER_MODE, 5);
203         OUT_RING(ring, 0);
204         OUT_RING(ring, 0);
205         OUT_RING(ring, 0);
206         OUT_RING(ring, 0);
207         OUT_RING(ring, 0);
208
209         /* Turn off IB level preemptions */
210         OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
211         OUT_RING(ring, 0x01);
212
213         /* Write the fence to the scratch register */
214         OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
215         OUT_RING(ring, submit->seqno);
216         a5xx_gpu->last_seqno[ring->id] = submit->seqno;
217
218         /*
219          * Execute a CACHE_FLUSH_TS event. This will ensure that the
220          * timestamp is written to the memory and then triggers the interrupt
221          */
222         OUT_PKT7(ring, CP_EVENT_WRITE, 4);
223         OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS) |
224                 CP_EVENT_WRITE_0_IRQ);
225         OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
226         OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
227         OUT_RING(ring, submit->seqno);
228
229         /* Yield the floor on command completion */
230         OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
231         /*
232          * If dword[2:1] are non zero, they specify an address for the CP to
233          * write the value of dword[3] to on preemption complete. Write 0 to
234          * skip the write
235          */
236         OUT_RING(ring, 0x00);
237         OUT_RING(ring, 0x00);
238         /* Data value - not used if the address above is 0 */
239         OUT_RING(ring, 0x01);
240         /* Set bit 0 to trigger an interrupt on preempt complete */
241         OUT_RING(ring, 0x01);
242
243         /* A WHERE_AM_I packet is not needed after a YIELD */
244         a5xx_flush(gpu, ring, false);
245
246         /* Check to see if we need to start preemption */
247         a5xx_preempt_trigger(gpu);
248 }
249
250 static const struct adreno_five_hwcg_regs {
251         u32 offset;
252         u32 value;
253 } a5xx_hwcg[] = {
254         {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
255         {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
256         {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
257         {REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
258         {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
259         {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
260         {REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
261         {REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
262         {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
263         {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
264         {REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
265         {REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
266         {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
267         {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
268         {REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
269         {REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
270         {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
271         {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
272         {REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
273         {REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
274         {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
275         {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
276         {REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
277         {REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
278         {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
279         {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
280         {REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
281         {REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
282         {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
283         {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
284         {REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
285         {REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
286         {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
287         {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
288         {REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
289         {REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
290         {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
291         {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
292         {REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
293         {REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
294         {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
295         {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
296         {REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
297         {REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
298         {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
299         {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
300         {REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
301         {REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
302         {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
303         {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
304         {REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
305         {REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
306         {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
307         {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
308         {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
309         {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
310         {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
311         {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
312         {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
313         {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
314         {REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
315         {REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
316         {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
317         {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
318         {REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
319         {REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
320         {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
321         {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
322         {REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
323         {REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
324         {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
325         {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
326         {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
327         {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
328         {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
329         {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
330         {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
331         {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
332         {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
333         {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
334         {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
335         {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
336         {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
337         {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
338         {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
339         {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
340         {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
341         {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
342         {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
343         {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
344         {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
345         {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
346 }, a50x_hwcg[] = {
347         {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
348         {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
349         {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
350         {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
351         {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
352         {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
353         {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
354         {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
355         {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
356         {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
357         {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
358         {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
359         {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
360         {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
361         {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
362         {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
363         {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
364         {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00FFFFF4},
365         {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
366         {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
367         {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
368         {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
369         {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
370         {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
371         {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
372         {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
373         {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
374         {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
375         {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
376         {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
377         {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
378         {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
379         {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
380         {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
381         {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
382         {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
383         {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
384         {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
385 }, a512_hwcg[] = {
386         {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
387         {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
388         {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
389         {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
390         {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
391         {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
392         {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
393         {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
394         {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
395         {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
396         {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
397         {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
398         {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
399         {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
400         {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
401         {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
402         {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
403         {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
404         {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
405         {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
406         {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
407         {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
408         {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
409         {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
410         {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
411         {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
412         {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
413         {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
414         {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
415         {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
416         {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
417         {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
418         {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
419         {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
420         {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
421         {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
422         {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
423         {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
424         {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
425         {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
426         {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
427         {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
428         {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
429         {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
430         {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
431         {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
432         {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
433         {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
434         {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
435         {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
436         {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
437         {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
438         {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
439         {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
440         {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
441         {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
442 };
443
444 void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
445 {
446         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
447         const struct adreno_five_hwcg_regs *regs;
448         unsigned int i, sz;
449
450         if (adreno_is_a505(adreno_gpu) || adreno_is_a506(adreno_gpu) ||
451             adreno_is_a508(adreno_gpu)) {
452                 regs = a50x_hwcg;
453                 sz = ARRAY_SIZE(a50x_hwcg);
454         } else if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu)) {
455                 regs = a512_hwcg;
456                 sz = ARRAY_SIZE(a512_hwcg);
457         } else {
458                 regs = a5xx_hwcg;
459                 sz = ARRAY_SIZE(a5xx_hwcg);
460         }
461
462         for (i = 0; i < sz; i++)
463                 gpu_write(gpu, regs[i].offset,
464                           state ? regs[i].value : 0);
465
466         if (adreno_is_a540(adreno_gpu)) {
467                 gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU, state ? 0x00000770 : 0);
468                 gpu_write(gpu, REG_A5XX_RBBM_CLOCK_HYST_GPMU, state ? 0x00000004 : 0);
469         }
470
471         gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
472         gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
473 }
474
475 static int a5xx_me_init(struct msm_gpu *gpu)
476 {
477         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
478         struct msm_ringbuffer *ring = gpu->rb[0];
479
480         OUT_PKT7(ring, CP_ME_INIT, 8);
481
482         OUT_RING(ring, 0x0000002F);
483
484         /* Enable multiple hardware contexts */
485         OUT_RING(ring, 0x00000003);
486
487         /* Enable error detection */
488         OUT_RING(ring, 0x20000000);
489
490         /* Don't enable header dump */
491         OUT_RING(ring, 0x00000000);
492         OUT_RING(ring, 0x00000000);
493
494         /* Specify workarounds for various microcode issues */
495         if (adreno_is_a505(adreno_gpu) || adreno_is_a506(adreno_gpu) ||
496             adreno_is_a530(adreno_gpu)) {
497                 /* Workaround for token end syncs
498                  * Force a WFI after every direct-render 3D mode draw and every
499                  * 2D mode 3 draw
500                  */
501                 OUT_RING(ring, 0x0000000B);
502         } else if (adreno_is_a510(adreno_gpu)) {
503                 /* Workaround for token and syncs */
504                 OUT_RING(ring, 0x00000001);
505         } else {
506                 /* No workarounds enabled */
507                 OUT_RING(ring, 0x00000000);
508         }
509
510         OUT_RING(ring, 0x00000000);
511         OUT_RING(ring, 0x00000000);
512
513         a5xx_flush(gpu, ring, true);
514         return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
515 }
516
517 static int a5xx_preempt_start(struct msm_gpu *gpu)
518 {
519         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
520         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
521         struct msm_ringbuffer *ring = gpu->rb[0];
522
523         if (gpu->nr_rings == 1)
524                 return 0;
525
526         /* Turn off protected mode to write to special registers */
527         OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
528         OUT_RING(ring, 0);
529
530         /* Set the save preemption record for the ring/command */
531         OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
532         OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id]));
533         OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id]));
534
535         /* Turn back on protected mode */
536         OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
537         OUT_RING(ring, 1);
538
539         OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
540         OUT_RING(ring, 0x00);
541
542         OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
543         OUT_RING(ring, 0x01);
544
545         OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
546         OUT_RING(ring, 0x01);
547
548         /* Yield the floor on command completion */
549         OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
550         OUT_RING(ring, 0x00);
551         OUT_RING(ring, 0x00);
552         OUT_RING(ring, 0x01);
553         OUT_RING(ring, 0x01);
554
555         /* The WHERE_AMI_I packet is not needed after a YIELD is issued */
556         a5xx_flush(gpu, ring, false);
557
558         return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
559 }
560
561 static void a5xx_ucode_check_version(struct a5xx_gpu *a5xx_gpu,
562                 struct drm_gem_object *obj)
563 {
564         u32 *buf = msm_gem_get_vaddr(obj);
565
566         if (IS_ERR(buf))
567                 return;
568
569         /*
570          * If the lowest nibble is 0xa that is an indication that this microcode
571          * has been patched. The actual version is in dword [3] but we only care
572          * about the patchlevel which is the lowest nibble of dword [3]
573          */
574         if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
575                 a5xx_gpu->has_whereami = true;
576
577         msm_gem_put_vaddr(obj);
578 }
579
580 static int a5xx_ucode_load(struct msm_gpu *gpu)
581 {
582         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
583         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
584         int ret;
585
586         if (!a5xx_gpu->pm4_bo) {
587                 a5xx_gpu->pm4_bo = adreno_fw_create_bo(gpu,
588                         adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova);
589
590
591                 if (IS_ERR(a5xx_gpu->pm4_bo)) {
592                         ret = PTR_ERR(a5xx_gpu->pm4_bo);
593                         a5xx_gpu->pm4_bo = NULL;
594                         DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PM4: %d\n",
595                                 ret);
596                         return ret;
597                 }
598
599                 msm_gem_object_set_name(a5xx_gpu->pm4_bo, "pm4fw");
600         }
601
602         if (!a5xx_gpu->pfp_bo) {
603                 a5xx_gpu->pfp_bo = adreno_fw_create_bo(gpu,
604                         adreno_gpu->fw[ADRENO_FW_PFP], &a5xx_gpu->pfp_iova);
605
606                 if (IS_ERR(a5xx_gpu->pfp_bo)) {
607                         ret = PTR_ERR(a5xx_gpu->pfp_bo);
608                         a5xx_gpu->pfp_bo = NULL;
609                         DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PFP: %d\n",
610                                 ret);
611                         return ret;
612                 }
613
614                 msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw");
615                 a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo);
616         }
617
618         if (a5xx_gpu->has_whereami) {
619                 if (!a5xx_gpu->shadow_bo) {
620                         a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
621                                 sizeof(u32) * gpu->nr_rings,
622                                 MSM_BO_WC | MSM_BO_MAP_PRIV,
623                                 gpu->aspace, &a5xx_gpu->shadow_bo,
624                                 &a5xx_gpu->shadow_iova);
625
626                         if (IS_ERR(a5xx_gpu->shadow))
627                                 return PTR_ERR(a5xx_gpu->shadow);
628
629                         msm_gem_object_set_name(a5xx_gpu->shadow_bo, "shadow");
630                 }
631         } else if (gpu->nr_rings > 1) {
632                 /* Disable preemption if WHERE_AM_I isn't available */
633                 a5xx_preempt_fini(gpu);
634                 gpu->nr_rings = 1;
635         }
636
637         return 0;
638 }
639
640 #define SCM_GPU_ZAP_SHADER_RESUME 0
641
642 static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
643 {
644         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
645         int ret;
646
647         /*
648          * Adreno 506 have CPZ Retention feature and doesn't require
649          * to resume zap shader
650          */
651         if (adreno_is_a506(adreno_gpu))
652                 return 0;
653
654         ret = qcom_scm_set_remote_state(SCM_GPU_ZAP_SHADER_RESUME, GPU_PAS_ID);
655         if (ret)
656                 DRM_ERROR("%s: zap-shader resume failed: %d\n",
657                         gpu->name, ret);
658
659         return ret;
660 }
661
662 static int a5xx_zap_shader_init(struct msm_gpu *gpu)
663 {
664         static bool loaded;
665         int ret;
666
667         /*
668          * If the zap shader is already loaded into memory we just need to kick
669          * the remote processor to reinitialize it
670          */
671         if (loaded)
672                 return a5xx_zap_shader_resume(gpu);
673
674         ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
675
676         loaded = !ret;
677         return ret;
678 }
679
680 #define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
681           A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
682           A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
683           A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
684           A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
685           A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
686           A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
687           A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \
688           A5XX_RBBM_INT_0_MASK_CP_SW | \
689           A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
690           A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
691           A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
692
693 static int a5xx_hw_init(struct msm_gpu *gpu)
694 {
695         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
696         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
697         u32 hbb;
698         int ret;
699
700         gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
701
702         if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
703             adreno_is_a540(adreno_gpu))
704                 gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
705
706         /* Make all blocks contribute to the GPU BUSY perf counter */
707         gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
708
709         /* Enable RBBM error reporting bits */
710         gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
711
712         if (adreno_gpu->info->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
713                 /*
714                  * Mask out the activity signals from RB1-3 to avoid false
715                  * positives
716                  */
717
718                 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11,
719                         0xF0000000);
720                 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12,
721                         0xFFFFFFFF);
722                 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13,
723                         0xFFFFFFFF);
724                 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14,
725                         0xFFFFFFFF);
726                 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15,
727                         0xFFFFFFFF);
728                 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16,
729                         0xFFFFFFFF);
730                 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17,
731                         0xFFFFFFFF);
732                 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18,
733                         0xFFFFFFFF);
734         }
735
736         /* Enable fault detection */
737         gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
738                 (1 << 30) | 0xFFFF);
739
740         /* Turn on performance counters */
741         gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01);
742
743         /* Select CP0 to always count cycles */
744         gpu_write(gpu, REG_A5XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
745
746         /* Select RBBM0 to countable 6 to get the busy status for devfreq */
747         gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6);
748
749         /* Increase VFD cache access so LRZ and other data gets evicted less */
750         gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
751
752         /* Disable L2 bypass in the UCHE */
753         gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, lower_32_bits(adreno_gpu->uche_trap_base));
754         gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, upper_32_bits(adreno_gpu->uche_trap_base));
755         gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, lower_32_bits(adreno_gpu->uche_trap_base));
756         gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, upper_32_bits(adreno_gpu->uche_trap_base));
757
758         /* Set the GMEM VA range (0 to gpu->gmem) */
759         gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
760         gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00000000);
761         gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO,
762                 0x00100000 + adreno_gpu->info->gmem - 1);
763         gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
764
765         if (adreno_is_a505(adreno_gpu) || adreno_is_a506(adreno_gpu) ||
766             adreno_is_a508(adreno_gpu) || adreno_is_a510(adreno_gpu)) {
767                 gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20);
768                 if (adreno_is_a505(adreno_gpu) || adreno_is_a506(adreno_gpu) ||
769                         adreno_is_a508(adreno_gpu))
770                         gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
771                 else
772                         gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20);
773                 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030);
774                 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A);
775         } else {
776                 gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
777                 if (adreno_is_a530(adreno_gpu))
778                         gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
779                 else
780                         gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
781                 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
782                 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
783         }
784
785         if (adreno_is_a505(adreno_gpu) || adreno_is_a506(adreno_gpu) ||
786             adreno_is_a508(adreno_gpu))
787                 gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
788                           (0x100 << 11 | 0x100 << 22));
789         else if (adreno_is_a509(adreno_gpu) || adreno_is_a510(adreno_gpu) ||
790                  adreno_is_a512(adreno_gpu))
791                 gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
792                           (0x200 << 11 | 0x200 << 22));
793         else
794                 gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
795                           (0x400 << 11 | 0x300 << 22));
796
797         if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
798                 gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
799
800         /*
801          * Disable the RB sampler datapath DP2 clock gating optimization
802          * for 1-SP GPUs, as it is enabled by default.
803          */
804         if (adreno_is_a505(adreno_gpu) || adreno_is_a506(adreno_gpu) ||
805             adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) ||
806             adreno_is_a512(adreno_gpu))
807                 gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, 0, (1 << 9));
808
809         /* Disable UCHE global filter as SP can invalidate/flush independently */
810         gpu_write(gpu, REG_A5XX_UCHE_MODE_CNTL, BIT(29));
811
812         /* Enable USE_RETENTION_FLOPS */
813         gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
814
815         /* Enable ME/PFP split notification */
816         gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
817
818         /*
819          *  In A5x, CCU can send context_done event of a particular context to
820          *  UCHE which ultimately reaches CP even when there is valid
821          *  transaction of that context inside CCU. This can let CP to program
822          *  config registers, which will make the "valid transaction" inside
823          *  CCU to be interpreted differently. This can cause gpu fault. This
824          *  bug is fixed in latest A510 revision. To enable this bug fix -
825          *  bit[11] of RB_DBG_ECO_CNTL need to be set to 0, default is 1
826          *  (disable). For older A510 version this bit is unused.
827          */
828         if (adreno_is_a510(adreno_gpu))
829                 gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, (1 << 11), 0);
830
831         /* Enable HWCG */
832         a5xx_set_hwcg(gpu, true);
833
834         gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
835
836         BUG_ON(adreno_gpu->ubwc_config.highest_bank_bit < 13);
837         hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13;
838
839         gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, hbb << 7);
840         gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, hbb << 1);
841
842         if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
843             adreno_is_a540(adreno_gpu))
844                 gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, hbb);
845
846         /* Disable All flat shading optimization (ALLFLATOPTDIS) */
847         gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, (1 << 10));
848
849         /* Protect registers from the CP */
850         gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
851
852         /* RBBM */
853         gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4));
854         gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8));
855         gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16));
856         gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32));
857         gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64));
858         gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64));
859
860         /* Content protect */
861         gpu_write(gpu, REG_A5XX_CP_PROTECT(6),
862                 ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
863                         16));
864         gpu_write(gpu, REG_A5XX_CP_PROTECT(7),
865                 ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2));
866
867         /* CP */
868         gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64));
869         gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8));
870         gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32));
871         gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1));
872
873         /* RB */
874         gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1));
875         gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2));
876
877         /* VPC */
878         gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
879         gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 16));
880
881         /* UCHE */
882         gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
883
884         /* SMMU */
885         gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
886                         ADRENO_PROTECT_RW(0x10000, 0x8000));
887
888         gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
889         /*
890          * Disable the trusted memory range - we don't actually supported secure
891          * memory rendering at this point in time and we don't want to block off
892          * part of the virtual memory space.
893          */
894         gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 0x00000000);
895         gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
896
897         /* Put the GPU into 64 bit by default */
898         gpu_write(gpu, REG_A5XX_CP_ADDR_MODE_CNTL, 0x1);
899         gpu_write(gpu, REG_A5XX_VSC_ADDR_MODE_CNTL, 0x1);
900         gpu_write(gpu, REG_A5XX_GRAS_ADDR_MODE_CNTL, 0x1);
901         gpu_write(gpu, REG_A5XX_RB_ADDR_MODE_CNTL, 0x1);
902         gpu_write(gpu, REG_A5XX_PC_ADDR_MODE_CNTL, 0x1);
903         gpu_write(gpu, REG_A5XX_HLSQ_ADDR_MODE_CNTL, 0x1);
904         gpu_write(gpu, REG_A5XX_VFD_ADDR_MODE_CNTL, 0x1);
905         gpu_write(gpu, REG_A5XX_VPC_ADDR_MODE_CNTL, 0x1);
906         gpu_write(gpu, REG_A5XX_UCHE_ADDR_MODE_CNTL, 0x1);
907         gpu_write(gpu, REG_A5XX_SP_ADDR_MODE_CNTL, 0x1);
908         gpu_write(gpu, REG_A5XX_TPL1_ADDR_MODE_CNTL, 0x1);
909         gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
910
911         /*
912          * VPC corner case with local memory load kill leads to corrupt
913          * internal state. Normal Disable does not work for all a5x chips.
914          * So do the following setting to disable it.
915          */
916         if (adreno_gpu->info->quirks & ADRENO_QUIRK_LMLOADKILL_DISABLE) {
917                 gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, BIT(23));
918                 gpu_rmw(gpu, REG_A5XX_HLSQ_DBG_ECO_CNTL, BIT(18), 0);
919         }
920
921         ret = adreno_hw_init(gpu);
922         if (ret)
923                 return ret;
924
925         if (adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))
926                 a5xx_gpmu_ucode_init(gpu);
927
928         gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, a5xx_gpu->pm4_iova);
929         gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO, a5xx_gpu->pfp_iova);
930
931         /* Set the ringbuffer address */
932         gpu_write64(gpu, REG_A5XX_CP_RB_BASE, gpu->rb[0]->iova);
933
934         /*
935          * If the microcode supports the WHERE_AM_I opcode then we can use that
936          * in lieu of the RPTR shadow and enable preemption. Otherwise, we
937          * can't safely use the RPTR shadow or preemption. In either case, the
938          * RPTR shadow should be disabled in hardware.
939          */
940         gpu_write(gpu, REG_A5XX_CP_RB_CNTL,
941                 MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
942
943         /* Configure the RPTR shadow if needed: */
944         if (a5xx_gpu->shadow_bo) {
945                 gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR,
946                             shadowptr(a5xx_gpu, gpu->rb[0]));
947         }
948
949         a5xx_preempt_hw_init(gpu);
950
951         /* Disable the interrupts through the initial bringup stage */
952         gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
953
954         /* Clear ME_HALT to start the micro engine */
955         gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0);
956         ret = a5xx_me_init(gpu);
957         if (ret)
958                 return ret;
959
960         ret = a5xx_power_init(gpu);
961         if (ret)
962                 return ret;
963
964         /*
965          * Send a pipeline event stat to get misbehaving counters to start
966          * ticking correctly
967          */
968         if (adreno_is_a530(adreno_gpu)) {
969                 OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
970                 OUT_RING(gpu->rb[0], CP_EVENT_WRITE_0_EVENT(STAT_EVENT));
971
972                 a5xx_flush(gpu, gpu->rb[0], true);
973                 if (!a5xx_idle(gpu, gpu->rb[0]))
974                         return -EINVAL;
975         }
976
977         /*
978          * If the chip that we are using does support loading one, then
979          * try to load a zap shader into the secure world. If successful
980          * we can use the CP to switch out of secure mode. If not then we
981          * have no resource but to try to switch ourselves out manually. If we
982          * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
983          * be blocked and a permissions violation will soon follow.
984          */
985         ret = a5xx_zap_shader_init(gpu);
986         if (!ret) {
987                 OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
988                 OUT_RING(gpu->rb[0], 0x00000000);
989
990                 a5xx_flush(gpu, gpu->rb[0], true);
991                 if (!a5xx_idle(gpu, gpu->rb[0]))
992                         return -EINVAL;
993         } else if (ret == -ENODEV) {
994                 /*
995                  * This device does not use zap shader (but print a warning
996                  * just in case someone got their dt wrong.. hopefully they
997                  * have a debug UART to realize the error of their ways...
998                  * if you mess this up you are about to crash horribly)
999                  */
1000                 dev_warn_once(gpu->dev->dev,
1001                         "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
1002                 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
1003         } else {
1004                 return ret;
1005         }
1006
1007         /* Last step - yield the ringbuffer */
1008         a5xx_preempt_start(gpu);
1009
1010         return 0;
1011 }
1012
1013 static void a5xx_recover(struct msm_gpu *gpu)
1014 {
1015         int i;
1016
1017         adreno_dump_info(gpu);
1018
1019         for (i = 0; i < 8; i++) {
1020                 printk("CP_SCRATCH_REG%d: %u\n", i,
1021                         gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(i)));
1022         }
1023
1024         if (hang_debug)
1025                 a5xx_dump(gpu);
1026
1027         gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1);
1028         gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD);
1029         gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0);
1030         adreno_recover(gpu);
1031 }
1032
1033 static void a5xx_destroy(struct msm_gpu *gpu)
1034 {
1035         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1036         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
1037
1038         DBG("%s", gpu->name);
1039
1040         a5xx_preempt_fini(gpu);
1041
1042         if (a5xx_gpu->pm4_bo) {
1043                 msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
1044                 drm_gem_object_put(a5xx_gpu->pm4_bo);
1045         }
1046
1047         if (a5xx_gpu->pfp_bo) {
1048                 msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
1049                 drm_gem_object_put(a5xx_gpu->pfp_bo);
1050         }
1051
1052         if (a5xx_gpu->gpmu_bo) {
1053                 msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
1054                 drm_gem_object_put(a5xx_gpu->gpmu_bo);
1055         }
1056
1057         if (a5xx_gpu->shadow_bo) {
1058                 msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->aspace);
1059                 drm_gem_object_put(a5xx_gpu->shadow_bo);
1060         }
1061
1062         adreno_gpu_cleanup(adreno_gpu);
1063         kfree(a5xx_gpu);
1064 }
1065
1066 static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
1067 {
1068         if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY)
1069                 return false;
1070
1071         /*
1072          * Nearly every abnormality ends up pausing the GPU and triggering a
1073          * fault so we can safely just watch for this one interrupt to fire
1074          */
1075         return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) &
1076                 A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
1077 }
1078
1079 bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
1080 {
1081         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1082         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
1083
1084         if (ring != a5xx_gpu->cur_ring) {
1085                 WARN(1, "Tried to idle a non-current ringbuffer\n");
1086                 return false;
1087         }
1088
1089         /* wait for CP to drain ringbuffer: */
1090         if (!adreno_idle(gpu, ring))
1091                 return false;
1092
1093         if (spin_until(_a5xx_check_idle(gpu))) {
1094                 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
1095                         gpu->name, __builtin_return_address(0),
1096                         gpu_read(gpu, REG_A5XX_RBBM_STATUS),
1097                         gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS),
1098                         gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
1099                         gpu_read(gpu, REG_A5XX_CP_RB_WPTR));
1100                 return false;
1101         }
1102
1103         return true;
1104 }
1105
1106 static int a5xx_fault_handler(void *arg, unsigned long iova, int flags, void *data)
1107 {
1108         struct msm_gpu *gpu = arg;
1109         struct adreno_smmu_fault_info *info = data;
1110         char block[12] = "unknown";
1111         u32 scratch[] = {
1112                         gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(4)),
1113                         gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(5)),
1114                         gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)),
1115                         gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7)),
1116         };
1117
1118         if (info)
1119                 snprintf(block, sizeof(block), "%x", info->fsynr1);
1120
1121         return adreno_fault_handler(gpu, iova, flags, info, block, scratch);
1122 }
1123
1124 static void a5xx_cp_err_irq(struct msm_gpu *gpu)
1125 {
1126         u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
1127
1128         if (status & A5XX_CP_INT_CP_OPCODE_ERROR) {
1129                 u32 val;
1130
1131                 gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0);
1132
1133                 /*
1134                  * REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so
1135                  * read it twice
1136                  */
1137
1138                 gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
1139                 val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
1140
1141                 dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n",
1142                         val);
1143         }
1144
1145         if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR)
1146                 dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n",
1147                         gpu_read(gpu, REG_A5XX_CP_HW_FAULT));
1148
1149         if (status & A5XX_CP_INT_CP_DMA_ERROR)
1150                 dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n");
1151
1152         if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
1153                 u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS);
1154
1155                 dev_err_ratelimited(gpu->dev->dev,
1156                         "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
1157                         val & (1 << 24) ? "WRITE" : "READ",
1158                         (val & 0xFFFFF) >> 2, val);
1159         }
1160
1161         if (status & A5XX_CP_INT_CP_AHB_ERROR) {
1162                 u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT);
1163                 const char *access[16] = { "reserved", "reserved",
1164                         "timestamp lo", "timestamp hi", "pfp read", "pfp write",
1165                         "", "", "me read", "me write", "", "", "crashdump read",
1166                         "crashdump write" };
1167
1168                 dev_err_ratelimited(gpu->dev->dev,
1169                         "CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n",
1170                         status & 0xFFFFF, access[(status >> 24) & 0xF],
1171                         (status & (1 << 31)), status);
1172         }
1173 }
1174
1175 static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status)
1176 {
1177         if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
1178                 u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
1179
1180                 dev_err_ratelimited(gpu->dev->dev,
1181                         "RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n",
1182                         val & (1 << 28) ? "WRITE" : "READ",
1183                         (val & 0xFFFFF) >> 2, (val >> 20) & 0x3,
1184                         (val >> 24) & 0xF);
1185
1186                 /* Clear the error */
1187                 gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
1188
1189                 /* Clear the interrupt */
1190                 gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
1191                         A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
1192         }
1193
1194         if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
1195                 dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n");
1196
1197         if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT)
1198                 dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n",
1199                         gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS));
1200
1201         if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT)
1202                 dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n",
1203                         gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS));
1204
1205         if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT)
1206                 dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n",
1207                         gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS));
1208
1209         if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
1210                 dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n");
1211
1212         if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
1213                 dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n");
1214 }
1215
1216 static void a5xx_uche_err_irq(struct msm_gpu *gpu)
1217 {
1218         uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI);
1219
1220         addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO);
1221
1222         dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n",
1223                 addr);
1224 }
1225
1226 static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
1227 {
1228         dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n");
1229 }
1230
1231 static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
1232 {
1233         struct drm_device *dev = gpu->dev;
1234         struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
1235
1236         /*
1237          * If stalled on SMMU fault, we could trip the GPU's hang detection,
1238          * but the fault handler will trigger the devcore dump, and we want
1239          * to otherwise resume normally rather than killing the submit, so
1240          * just bail.
1241          */
1242         if (gpu_read(gpu, REG_A5XX_RBBM_STATUS3) & BIT(24))
1243                 return;
1244
1245         DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
1246                 ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
1247                 gpu_read(gpu, REG_A5XX_RBBM_STATUS),
1248                 gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
1249                 gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
1250                 gpu_read64(gpu, REG_A5XX_CP_IB1_BASE),
1251                 gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ),
1252                 gpu_read64(gpu, REG_A5XX_CP_IB2_BASE),
1253                 gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
1254
1255         /* Turn off the hangcheck timer to keep it from bothering us */
1256         del_timer(&gpu->hangcheck_timer);
1257
1258         kthread_queue_work(gpu->worker, &gpu->recover_work);
1259 }
1260
1261 #define RBBM_ERROR_MASK \
1262         (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
1263         A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
1264         A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
1265         A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
1266         A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
1267         A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
1268
1269 static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
1270 {
1271         struct msm_drm_private *priv = gpu->dev->dev_private;
1272         u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
1273
1274         /*
1275          * Clear all the interrupts except RBBM_AHB_ERROR - if we clear it
1276          * before the source is cleared the interrupt will storm.
1277          */
1278         gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
1279                 status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
1280
1281         if (priv->disable_err_irq) {
1282                 status &= A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS |
1283                           A5XX_RBBM_INT_0_MASK_CP_SW;
1284         }
1285
1286         /* Pass status to a5xx_rbbm_err_irq because we've already cleared it */
1287         if (status & RBBM_ERROR_MASK)
1288                 a5xx_rbbm_err_irq(gpu, status);
1289
1290         if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
1291                 a5xx_cp_err_irq(gpu);
1292
1293         if (status & A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT)
1294                 a5xx_fault_detect_irq(gpu);
1295
1296         if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
1297                 a5xx_uche_err_irq(gpu);
1298
1299         if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
1300                 a5xx_gpmu_err_irq(gpu);
1301
1302         if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) {
1303                 a5xx_preempt_trigger(gpu);
1304                 msm_gpu_retire(gpu);
1305         }
1306
1307         if (status & A5XX_RBBM_INT_0_MASK_CP_SW)
1308                 a5xx_preempt_irq(gpu);
1309
1310         return IRQ_HANDLED;
1311 }
1312
1313 static const u32 a5xx_registers[] = {
1314         0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
1315         0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
1316         0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
1317         0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
1318         0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
1319         0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
1320         0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
1321         0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
1322         0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
1323         0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
1324         0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
1325         0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
1326         0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
1327         0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
1328         0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
1329         0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
1330         0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
1331         0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
1332         0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
1333         0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
1334         0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
1335         0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
1336         0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
1337         0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
1338         0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
1339         0xEAA5, 0xEAC2, 0xA800, 0xA800, 0xA820, 0xA828, 0xA840, 0xA87D,
1340         0XA880, 0xA88D, 0xA890, 0xA8A3, 0xA8D0, 0xA8D8, 0xA8E0, 0xA8F5,
1341         0xAC60, 0xAC60, ~0,
1342 };
1343
1344 static void a5xx_dump(struct msm_gpu *gpu)
1345 {
1346         DRM_DEV_INFO(gpu->dev->dev, "status:   %08x\n",
1347                 gpu_read(gpu, REG_A5XX_RBBM_STATUS));
1348         adreno_dump(gpu);
1349 }
1350
1351 static int a5xx_pm_resume(struct msm_gpu *gpu)
1352 {
1353         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1354         int ret;
1355
1356         /* Turn on the core power */
1357         ret = msm_gpu_pm_resume(gpu);
1358         if (ret)
1359                 return ret;
1360
1361         /* Adreno 505, 506, 508, 509, 510, 512 needs manual RBBM sus/res control */
1362         if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))) {
1363                 /* Halt the sp_input_clk at HM level */
1364                 gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055);
1365                 a5xx_set_hwcg(gpu, true);
1366                 /* Turn on sp_input_clk at HM level */
1367                 gpu_rmw(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xff, 0);
1368                 return 0;
1369         }
1370
1371         /* Turn the RBCCU domain first to limit the chances of voltage droop */
1372         gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
1373
1374         /* Wait 3 usecs before polling */
1375         udelay(3);
1376
1377         ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS,
1378                 (1 << 20), (1 << 20));
1379         if (ret) {
1380                 DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n",
1381                         gpu->name,
1382                         gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS));
1383                 return ret;
1384         }
1385
1386         /* Turn on the SP domain */
1387         gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000);
1388         ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS,
1389                 (1 << 20), (1 << 20));
1390         if (ret)
1391                 DRM_ERROR("%s: timeout waiting for SP GDSC enable\n",
1392                         gpu->name);
1393
1394         return ret;
1395 }
1396
1397 static int a5xx_pm_suspend(struct msm_gpu *gpu)
1398 {
1399         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1400         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
1401         u32 mask = 0xf;
1402         int i, ret;
1403
1404         /* A505, A506, A508, A510 have 3 XIN ports in VBIF */
1405         if (adreno_is_a505(adreno_gpu) || adreno_is_a506(adreno_gpu) ||
1406             adreno_is_a508(adreno_gpu) || adreno_is_a510(adreno_gpu))
1407                 mask = 0x7;
1408
1409         /* Clear the VBIF pipe before shutting down */
1410         gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, mask);
1411         spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) &
1412                                 mask) == mask);
1413
1414         gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
1415
1416         /*
1417          * Reset the VBIF before power collapse to avoid issue with FIFO
1418          * entries on Adreno A510 and A530 (the others will tend to lock up)
1419          */
1420         if (adreno_is_a510(adreno_gpu) || adreno_is_a530(adreno_gpu)) {
1421                 gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
1422                 gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
1423         }
1424
1425         ret = msm_gpu_pm_suspend(gpu);
1426         if (ret)
1427                 return ret;
1428
1429         if (a5xx_gpu->has_whereami)
1430                 for (i = 0; i < gpu->nr_rings; i++)
1431                         a5xx_gpu->shadow[i] = 0;
1432
1433         return 0;
1434 }
1435
1436 static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
1437 {
1438         *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO);
1439
1440         return 0;
1441 }
1442
1443 struct a5xx_crashdumper {
1444         void *ptr;
1445         struct drm_gem_object *bo;
1446         u64 iova;
1447 };
1448
1449 struct a5xx_gpu_state {
1450         struct msm_gpu_state base;
1451         u32 *hlsqregs;
1452 };
1453
1454 static int a5xx_crashdumper_init(struct msm_gpu *gpu,
1455                 struct a5xx_crashdumper *dumper)
1456 {
1457         dumper->ptr = msm_gem_kernel_new(gpu->dev,
1458                 SZ_1M, MSM_BO_WC, gpu->aspace,
1459                 &dumper->bo, &dumper->iova);
1460
1461         if (!IS_ERR(dumper->ptr))
1462                 msm_gem_object_set_name(dumper->bo, "crashdump");
1463
1464         return PTR_ERR_OR_ZERO(dumper->ptr);
1465 }
1466
1467 static int a5xx_crashdumper_run(struct msm_gpu *gpu,
1468                 struct a5xx_crashdumper *dumper)
1469 {
1470         u32 val;
1471
1472         if (IS_ERR_OR_NULL(dumper->ptr))
1473                 return -EINVAL;
1474
1475         gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO, dumper->iova);
1476
1477         gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
1478
1479         return gpu_poll_timeout(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, val,
1480                 val & 0x04, 100, 10000);
1481 }
1482
1483 /*
1484  * These are a list of the registers that need to be read through the HLSQ
1485  * aperture through the crashdumper.  These are not nominally accessible from
1486  * the CPU on a secure platform.
1487  */
1488 static const struct {
1489         u32 type;
1490         u32 regoffset;
1491         u32 count;
1492 } a5xx_hlsq_aperture_regs[] = {
1493         { 0x35, 0xe00, 0x32 },   /* HSLQ non-context */
1494         { 0x31, 0x2080, 0x1 },   /* HLSQ 2D context 0 */
1495         { 0x33, 0x2480, 0x1 },   /* HLSQ 2D context 1 */
1496         { 0x32, 0xe780, 0x62 },  /* HLSQ 3D context 0 */
1497         { 0x34, 0xef80, 0x62 },  /* HLSQ 3D context 1 */
1498         { 0x3f, 0x0ec0, 0x40 },  /* SP non-context */
1499         { 0x3d, 0x2040, 0x1 },   /* SP 2D context 0 */
1500         { 0x3b, 0x2440, 0x1 },   /* SP 2D context 1 */
1501         { 0x3e, 0xe580, 0x170 }, /* SP 3D context 0 */
1502         { 0x3c, 0xed80, 0x170 }, /* SP 3D context 1 */
1503         { 0x3a, 0x0f00, 0x1c },  /* TP non-context */
1504         { 0x38, 0x2000, 0xa },   /* TP 2D context 0 */
1505         { 0x36, 0x2400, 0xa },   /* TP 2D context 1 */
1506         { 0x39, 0xe700, 0x80 },  /* TP 3D context 0 */
1507         { 0x37, 0xef00, 0x80 },  /* TP 3D context 1 */
1508 };
1509
1510 static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
1511                 struct a5xx_gpu_state *a5xx_state)
1512 {
1513         struct a5xx_crashdumper dumper = { 0 };
1514         u32 offset, count = 0;
1515         u64 *ptr;
1516         int i;
1517
1518         if (a5xx_crashdumper_init(gpu, &dumper))
1519                 return;
1520
1521         /* The script will be written at offset 0 */
1522         ptr = dumper.ptr;
1523
1524         /* Start writing the data at offset 256k */
1525         offset = dumper.iova + (256 * SZ_1K);
1526
1527         /* Count how many additional registers to get from the HLSQ aperture */
1528         for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++)
1529                 count += a5xx_hlsq_aperture_regs[i].count;
1530
1531         a5xx_state->hlsqregs = kcalloc(count, sizeof(u32), GFP_KERNEL);
1532         if (!a5xx_state->hlsqregs)
1533                 return;
1534
1535         /* Build the crashdump script */
1536         for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
1537                 u32 type = a5xx_hlsq_aperture_regs[i].type;
1538                 u32 c = a5xx_hlsq_aperture_regs[i].count;
1539
1540                 /* Write the register to select the desired bank */
1541                 *ptr++ = ((u64) type << 8);
1542                 *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_READ_SEL) << 44) |
1543                         (1 << 21) | 1;
1544
1545                 *ptr++ = offset;
1546                 *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE) << 44)
1547                         | c;
1548
1549                 offset += c * sizeof(u32);
1550         }
1551
1552         /* Write two zeros to close off the script */
1553         *ptr++ = 0;
1554         *ptr++ = 0;
1555
1556         if (a5xx_crashdumper_run(gpu, &dumper)) {
1557                 kfree(a5xx_state->hlsqregs);
1558                 msm_gem_kernel_put(dumper.bo, gpu->aspace);
1559                 return;
1560         }
1561
1562         /* Copy the data from the crashdumper to the state */
1563         memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
1564                 count * sizeof(u32));
1565
1566         msm_gem_kernel_put(dumper.bo, gpu->aspace);
1567 }
1568
1569 static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
1570 {
1571         struct a5xx_gpu_state *a5xx_state = kzalloc(sizeof(*a5xx_state),
1572                         GFP_KERNEL);
1573         bool stalled = !!(gpu_read(gpu, REG_A5XX_RBBM_STATUS3) & BIT(24));
1574
1575         if (!a5xx_state)
1576                 return ERR_PTR(-ENOMEM);
1577
1578         /* Temporarily disable hardware clock gating before reading the hw */
1579         a5xx_set_hwcg(gpu, false);
1580
1581         /* First get the generic state from the adreno core */
1582         adreno_gpu_state_get(gpu, &(a5xx_state->base));
1583
1584         a5xx_state->base.rbbm_status = gpu_read(gpu, REG_A5XX_RBBM_STATUS);
1585
1586         /*
1587          * Get the HLSQ regs with the help of the crashdumper, but only if
1588          * we are not stalled in an iommu fault (in which case the crashdumper
1589          * would not have access to memory)
1590          */
1591         if (!stalled)
1592                 a5xx_gpu_state_get_hlsq_regs(gpu, a5xx_state);
1593
1594         a5xx_set_hwcg(gpu, true);
1595
1596         return &a5xx_state->base;
1597 }
1598
1599 static void a5xx_gpu_state_destroy(struct kref *kref)
1600 {
1601         struct msm_gpu_state *state = container_of(kref,
1602                 struct msm_gpu_state, ref);
1603         struct a5xx_gpu_state *a5xx_state = container_of(state,
1604                 struct a5xx_gpu_state, base);
1605
1606         kfree(a5xx_state->hlsqregs);
1607
1608         adreno_gpu_state_destroy(state);
1609         kfree(a5xx_state);
1610 }
1611
1612 static int a5xx_gpu_state_put(struct msm_gpu_state *state)
1613 {
1614         if (IS_ERR_OR_NULL(state))
1615                 return 1;
1616
1617         return kref_put(&state->ref, a5xx_gpu_state_destroy);
1618 }
1619
1620
1621 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
1622 static void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
1623                       struct drm_printer *p)
1624 {
1625         int i, j;
1626         u32 pos = 0;
1627         struct a5xx_gpu_state *a5xx_state = container_of(state,
1628                 struct a5xx_gpu_state, base);
1629
1630         if (IS_ERR_OR_NULL(state))
1631                 return;
1632
1633         adreno_show(gpu, state, p);
1634
1635         /* Dump the additional a5xx HLSQ registers */
1636         if (!a5xx_state->hlsqregs)
1637                 return;
1638
1639         drm_printf(p, "registers-hlsq:\n");
1640
1641         for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
1642                 u32 o = a5xx_hlsq_aperture_regs[i].regoffset;
1643                 u32 c = a5xx_hlsq_aperture_regs[i].count;
1644
1645                 for (j = 0; j < c; j++, pos++, o++) {
1646                         /*
1647                          * To keep the crashdump simple we pull the entire range
1648                          * for each register type but not all of the registers
1649                          * in the range are valid. Fortunately invalid registers
1650                          * stick out like a sore thumb with a value of
1651                          * 0xdeadbeef
1652                          */
1653                         if (a5xx_state->hlsqregs[pos] == 0xdeadbeef)
1654                                 continue;
1655
1656                         drm_printf(p, "  - { offset: 0x%04x, value: 0x%08x }\n",
1657                                 o << 2, a5xx_state->hlsqregs[pos]);
1658                 }
1659         }
1660 }
1661 #endif
1662
1663 static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu)
1664 {
1665         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1666         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
1667
1668         return a5xx_gpu->cur_ring;
1669 }
1670
1671 static u64 a5xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
1672 {
1673         u64 busy_cycles;
1674
1675         busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO);
1676         *out_sample_rate = clk_get_rate(gpu->core_clk);
1677
1678         return busy_cycles;
1679 }
1680
1681 static uint32_t a5xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
1682 {
1683         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1684         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
1685
1686         if (a5xx_gpu->has_whereami)
1687                 return a5xx_gpu->shadow[ring->id];
1688
1689         return ring->memptrs->rptr = gpu_read(gpu, REG_A5XX_CP_RB_RPTR);
1690 }
1691
1692 static const struct adreno_gpu_funcs funcs = {
1693         .base = {
1694                 .get_param = adreno_get_param,
1695                 .set_param = adreno_set_param,
1696                 .hw_init = a5xx_hw_init,
1697                 .ucode_load = a5xx_ucode_load,
1698                 .pm_suspend = a5xx_pm_suspend,
1699                 .pm_resume = a5xx_pm_resume,
1700                 .recover = a5xx_recover,
1701                 .submit = a5xx_submit,
1702                 .active_ring = a5xx_active_ring,
1703                 .irq = a5xx_irq,
1704                 .destroy = a5xx_destroy,
1705 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
1706                 .show = a5xx_show,
1707 #endif
1708 #if defined(CONFIG_DEBUG_FS)
1709                 .debugfs_init = a5xx_debugfs_init,
1710 #endif
1711                 .gpu_busy = a5xx_gpu_busy,
1712                 .gpu_state_get = a5xx_gpu_state_get,
1713                 .gpu_state_put = a5xx_gpu_state_put,
1714                 .create_address_space = adreno_create_address_space,
1715                 .get_rptr = a5xx_get_rptr,
1716         },
1717         .get_timestamp = a5xx_get_timestamp,
1718 };
1719
1720 static void check_speed_bin(struct device *dev)
1721 {
1722         struct nvmem_cell *cell;
1723         u32 val;
1724
1725         /*
1726          * If the OPP table specifies a opp-supported-hw property then we have
1727          * to set something with dev_pm_opp_set_supported_hw() or the table
1728          * doesn't get populated so pick an arbitrary value that should
1729          * ensure the default frequencies are selected but not conflict with any
1730          * actual bins
1731          */
1732         val = 0x80;
1733
1734         cell = nvmem_cell_get(dev, "speed_bin");
1735
1736         if (!IS_ERR(cell)) {
1737                 void *buf = nvmem_cell_read(cell, NULL);
1738
1739                 if (!IS_ERR(buf)) {
1740                         u8 bin = *((u8 *) buf);
1741
1742                         val = (1 << bin);
1743                         kfree(buf);
1744                 }
1745
1746                 nvmem_cell_put(cell);
1747         }
1748
1749         devm_pm_opp_set_supported_hw(dev, &val, 1);
1750 }
1751
1752 struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
1753 {
1754         struct msm_drm_private *priv = dev->dev_private;
1755         struct platform_device *pdev = priv->gpu_pdev;
1756         struct adreno_platform_config *config = pdev->dev.platform_data;
1757         struct a5xx_gpu *a5xx_gpu = NULL;
1758         struct adreno_gpu *adreno_gpu;
1759         struct msm_gpu *gpu;
1760         unsigned int nr_rings;
1761         int ret;
1762
1763         a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
1764         if (!a5xx_gpu)
1765                 return ERR_PTR(-ENOMEM);
1766
1767         adreno_gpu = &a5xx_gpu->base;
1768         gpu = &adreno_gpu->base;
1769
1770         adreno_gpu->registers = a5xx_registers;
1771
1772         a5xx_gpu->lm_leakage = 0x4E001A;
1773
1774         check_speed_bin(&pdev->dev);
1775
1776         nr_rings = 4;
1777
1778         if (config->info->revn == 510)
1779                 nr_rings = 1;
1780
1781         ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, nr_rings);
1782         if (ret) {
1783                 a5xx_destroy(&(a5xx_gpu->base.base));
1784                 return ERR_PTR(ret);
1785         }
1786
1787         if (gpu->aspace)
1788                 msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);
1789
1790         /* Set up the preemption specific bits and pieces for each ringbuffer */
1791         a5xx_preempt_init(gpu);
1792
1793         /* Set the highest bank bit */
1794         if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu))
1795                 adreno_gpu->ubwc_config.highest_bank_bit = 15;
1796         else
1797                 adreno_gpu->ubwc_config.highest_bank_bit = 14;
1798
1799         /* a5xx only supports UBWC 1.0, these are not configurable */
1800         adreno_gpu->ubwc_config.macrotile_mode = 0;
1801         adreno_gpu->ubwc_config.ubwc_swizzle = 0x7;
1802
1803         adreno_gpu->uche_trap_base = 0x0001ffffffff0000ull;
1804
1805         return gpu;
1806 }
This page took 0.134669 seconds and 4 git commands to generate.