2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <linux/module.h>
28 #include "amdgpu_ih.h"
29 #include "amdgpu_gfx.h"
32 #include "cik_structs.h"
34 #include "amdgpu_ucode.h"
35 #include "clearstate_ci.h"
37 #include "dce/dce_8_0_d.h"
38 #include "dce/dce_8_0_sh_mask.h"
40 #include "bif/bif_4_1_d.h"
41 #include "bif/bif_4_1_sh_mask.h"
43 #include "gca/gfx_7_0_d.h"
44 #include "gca/gfx_7_2_enum.h"
45 #include "gca/gfx_7_2_sh_mask.h"
47 #include "gmc/gmc_7_0_d.h"
48 #include "gmc/gmc_7_0_sh_mask.h"
50 #include "oss/oss_2_0_d.h"
51 #include "oss/oss_2_0_sh_mask.h"
53 #define NUM_SIMD_PER_CU 0x4 /* missing from the gfx_7 IP headers */
55 #define GFX7_NUM_GFX_RINGS 1
56 #define GFX7_MEC_HPD_SIZE 2048
58 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
59 static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
60 static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
62 MODULE_FIRMWARE("amdgpu/bonaire_pfp.bin");
63 MODULE_FIRMWARE("amdgpu/bonaire_me.bin");
64 MODULE_FIRMWARE("amdgpu/bonaire_ce.bin");
65 MODULE_FIRMWARE("amdgpu/bonaire_rlc.bin");
66 MODULE_FIRMWARE("amdgpu/bonaire_mec.bin");
68 MODULE_FIRMWARE("amdgpu/hawaii_pfp.bin");
69 MODULE_FIRMWARE("amdgpu/hawaii_me.bin");
70 MODULE_FIRMWARE("amdgpu/hawaii_ce.bin");
71 MODULE_FIRMWARE("amdgpu/hawaii_rlc.bin");
72 MODULE_FIRMWARE("amdgpu/hawaii_mec.bin");
74 MODULE_FIRMWARE("amdgpu/kaveri_pfp.bin");
75 MODULE_FIRMWARE("amdgpu/kaveri_me.bin");
76 MODULE_FIRMWARE("amdgpu/kaveri_ce.bin");
77 MODULE_FIRMWARE("amdgpu/kaveri_rlc.bin");
78 MODULE_FIRMWARE("amdgpu/kaveri_mec.bin");
79 MODULE_FIRMWARE("amdgpu/kaveri_mec2.bin");
81 MODULE_FIRMWARE("amdgpu/kabini_pfp.bin");
82 MODULE_FIRMWARE("amdgpu/kabini_me.bin");
83 MODULE_FIRMWARE("amdgpu/kabini_ce.bin");
84 MODULE_FIRMWARE("amdgpu/kabini_rlc.bin");
85 MODULE_FIRMWARE("amdgpu/kabini_mec.bin");
87 MODULE_FIRMWARE("amdgpu/mullins_pfp.bin");
88 MODULE_FIRMWARE("amdgpu/mullins_me.bin");
89 MODULE_FIRMWARE("amdgpu/mullins_ce.bin");
90 MODULE_FIRMWARE("amdgpu/mullins_rlc.bin");
91 MODULE_FIRMWARE("amdgpu/mullins_mec.bin");
93 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
95 {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
96 {mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
97 {mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
98 {mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
99 {mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
100 {mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
101 {mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
102 {mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
103 {mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
104 {mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
105 {mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
106 {mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
107 {mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
108 {mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
109 {mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
110 {mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
113 static const u32 spectre_rlc_save_restore_register_list[] =
115 (0x0e00 << 16) | (0xc12c >> 2),
117 (0x0e00 << 16) | (0xc140 >> 2),
119 (0x0e00 << 16) | (0xc150 >> 2),
121 (0x0e00 << 16) | (0xc15c >> 2),
123 (0x0e00 << 16) | (0xc168 >> 2),
125 (0x0e00 << 16) | (0xc170 >> 2),
127 (0x0e00 << 16) | (0xc178 >> 2),
129 (0x0e00 << 16) | (0xc204 >> 2),
131 (0x0e00 << 16) | (0xc2b4 >> 2),
133 (0x0e00 << 16) | (0xc2b8 >> 2),
135 (0x0e00 << 16) | (0xc2bc >> 2),
137 (0x0e00 << 16) | (0xc2c0 >> 2),
139 (0x0e00 << 16) | (0x8228 >> 2),
141 (0x0e00 << 16) | (0x829c >> 2),
143 (0x0e00 << 16) | (0x869c >> 2),
145 (0x0600 << 16) | (0x98f4 >> 2),
147 (0x0e00 << 16) | (0x98f8 >> 2),
149 (0x0e00 << 16) | (0x9900 >> 2),
151 (0x0e00 << 16) | (0xc260 >> 2),
153 (0x0e00 << 16) | (0x90e8 >> 2),
155 (0x0e00 << 16) | (0x3c000 >> 2),
157 (0x0e00 << 16) | (0x3c00c >> 2),
159 (0x0e00 << 16) | (0x8c1c >> 2),
161 (0x0e00 << 16) | (0x9700 >> 2),
163 (0x0e00 << 16) | (0xcd20 >> 2),
165 (0x4e00 << 16) | (0xcd20 >> 2),
167 (0x5e00 << 16) | (0xcd20 >> 2),
169 (0x6e00 << 16) | (0xcd20 >> 2),
171 (0x7e00 << 16) | (0xcd20 >> 2),
173 (0x8e00 << 16) | (0xcd20 >> 2),
175 (0x9e00 << 16) | (0xcd20 >> 2),
177 (0xae00 << 16) | (0xcd20 >> 2),
179 (0xbe00 << 16) | (0xcd20 >> 2),
181 (0x0e00 << 16) | (0x89bc >> 2),
183 (0x0e00 << 16) | (0x8900 >> 2),
186 (0x0e00 << 16) | (0xc130 >> 2),
188 (0x0e00 << 16) | (0xc134 >> 2),
190 (0x0e00 << 16) | (0xc1fc >> 2),
192 (0x0e00 << 16) | (0xc208 >> 2),
194 (0x0e00 << 16) | (0xc264 >> 2),
196 (0x0e00 << 16) | (0xc268 >> 2),
198 (0x0e00 << 16) | (0xc26c >> 2),
200 (0x0e00 << 16) | (0xc270 >> 2),
202 (0x0e00 << 16) | (0xc274 >> 2),
204 (0x0e00 << 16) | (0xc278 >> 2),
206 (0x0e00 << 16) | (0xc27c >> 2),
208 (0x0e00 << 16) | (0xc280 >> 2),
210 (0x0e00 << 16) | (0xc284 >> 2),
212 (0x0e00 << 16) | (0xc288 >> 2),
214 (0x0e00 << 16) | (0xc28c >> 2),
216 (0x0e00 << 16) | (0xc290 >> 2),
218 (0x0e00 << 16) | (0xc294 >> 2),
220 (0x0e00 << 16) | (0xc298 >> 2),
222 (0x0e00 << 16) | (0xc29c >> 2),
224 (0x0e00 << 16) | (0xc2a0 >> 2),
226 (0x0e00 << 16) | (0xc2a4 >> 2),
228 (0x0e00 << 16) | (0xc2a8 >> 2),
230 (0x0e00 << 16) | (0xc2ac >> 2),
232 (0x0e00 << 16) | (0xc2b0 >> 2),
234 (0x0e00 << 16) | (0x301d0 >> 2),
236 (0x0e00 << 16) | (0x30238 >> 2),
238 (0x0e00 << 16) | (0x30250 >> 2),
240 (0x0e00 << 16) | (0x30254 >> 2),
242 (0x0e00 << 16) | (0x30258 >> 2),
244 (0x0e00 << 16) | (0x3025c >> 2),
246 (0x4e00 << 16) | (0xc900 >> 2),
248 (0x5e00 << 16) | (0xc900 >> 2),
250 (0x6e00 << 16) | (0xc900 >> 2),
252 (0x7e00 << 16) | (0xc900 >> 2),
254 (0x8e00 << 16) | (0xc900 >> 2),
256 (0x9e00 << 16) | (0xc900 >> 2),
258 (0xae00 << 16) | (0xc900 >> 2),
260 (0xbe00 << 16) | (0xc900 >> 2),
262 (0x4e00 << 16) | (0xc904 >> 2),
264 (0x5e00 << 16) | (0xc904 >> 2),
266 (0x6e00 << 16) | (0xc904 >> 2),
268 (0x7e00 << 16) | (0xc904 >> 2),
270 (0x8e00 << 16) | (0xc904 >> 2),
272 (0x9e00 << 16) | (0xc904 >> 2),
274 (0xae00 << 16) | (0xc904 >> 2),
276 (0xbe00 << 16) | (0xc904 >> 2),
278 (0x4e00 << 16) | (0xc908 >> 2),
280 (0x5e00 << 16) | (0xc908 >> 2),
282 (0x6e00 << 16) | (0xc908 >> 2),
284 (0x7e00 << 16) | (0xc908 >> 2),
286 (0x8e00 << 16) | (0xc908 >> 2),
288 (0x9e00 << 16) | (0xc908 >> 2),
290 (0xae00 << 16) | (0xc908 >> 2),
292 (0xbe00 << 16) | (0xc908 >> 2),
294 (0x4e00 << 16) | (0xc90c >> 2),
296 (0x5e00 << 16) | (0xc90c >> 2),
298 (0x6e00 << 16) | (0xc90c >> 2),
300 (0x7e00 << 16) | (0xc90c >> 2),
302 (0x8e00 << 16) | (0xc90c >> 2),
304 (0x9e00 << 16) | (0xc90c >> 2),
306 (0xae00 << 16) | (0xc90c >> 2),
308 (0xbe00 << 16) | (0xc90c >> 2),
310 (0x4e00 << 16) | (0xc910 >> 2),
312 (0x5e00 << 16) | (0xc910 >> 2),
314 (0x6e00 << 16) | (0xc910 >> 2),
316 (0x7e00 << 16) | (0xc910 >> 2),
318 (0x8e00 << 16) | (0xc910 >> 2),
320 (0x9e00 << 16) | (0xc910 >> 2),
322 (0xae00 << 16) | (0xc910 >> 2),
324 (0xbe00 << 16) | (0xc910 >> 2),
326 (0x0e00 << 16) | (0xc99c >> 2),
328 (0x0e00 << 16) | (0x9834 >> 2),
330 (0x0000 << 16) | (0x30f00 >> 2),
332 (0x0001 << 16) | (0x30f00 >> 2),
334 (0x0000 << 16) | (0x30f04 >> 2),
336 (0x0001 << 16) | (0x30f04 >> 2),
338 (0x0000 << 16) | (0x30f08 >> 2),
340 (0x0001 << 16) | (0x30f08 >> 2),
342 (0x0000 << 16) | (0x30f0c >> 2),
344 (0x0001 << 16) | (0x30f0c >> 2),
346 (0x0600 << 16) | (0x9b7c >> 2),
348 (0x0e00 << 16) | (0x8a14 >> 2),
350 (0x0e00 << 16) | (0x8a18 >> 2),
352 (0x0600 << 16) | (0x30a00 >> 2),
354 (0x0e00 << 16) | (0x8bf0 >> 2),
356 (0x0e00 << 16) | (0x8bcc >> 2),
358 (0x0e00 << 16) | (0x8b24 >> 2),
360 (0x0e00 << 16) | (0x30a04 >> 2),
362 (0x0600 << 16) | (0x30a10 >> 2),
364 (0x0600 << 16) | (0x30a14 >> 2),
366 (0x0600 << 16) | (0x30a18 >> 2),
368 (0x0600 << 16) | (0x30a2c >> 2),
370 (0x0e00 << 16) | (0xc700 >> 2),
372 (0x0e00 << 16) | (0xc704 >> 2),
374 (0x0e00 << 16) | (0xc708 >> 2),
376 (0x0e00 << 16) | (0xc768 >> 2),
378 (0x0400 << 16) | (0xc770 >> 2),
380 (0x0400 << 16) | (0xc774 >> 2),
382 (0x0400 << 16) | (0xc778 >> 2),
384 (0x0400 << 16) | (0xc77c >> 2),
386 (0x0400 << 16) | (0xc780 >> 2),
388 (0x0400 << 16) | (0xc784 >> 2),
390 (0x0400 << 16) | (0xc788 >> 2),
392 (0x0400 << 16) | (0xc78c >> 2),
394 (0x0400 << 16) | (0xc798 >> 2),
396 (0x0400 << 16) | (0xc79c >> 2),
398 (0x0400 << 16) | (0xc7a0 >> 2),
400 (0x0400 << 16) | (0xc7a4 >> 2),
402 (0x0400 << 16) | (0xc7a8 >> 2),
404 (0x0400 << 16) | (0xc7ac >> 2),
406 (0x0400 << 16) | (0xc7b0 >> 2),
408 (0x0400 << 16) | (0xc7b4 >> 2),
410 (0x0e00 << 16) | (0x9100 >> 2),
412 (0x0e00 << 16) | (0x3c010 >> 2),
414 (0x0e00 << 16) | (0x92a8 >> 2),
416 (0x0e00 << 16) | (0x92ac >> 2),
418 (0x0e00 << 16) | (0x92b4 >> 2),
420 (0x0e00 << 16) | (0x92b8 >> 2),
422 (0x0e00 << 16) | (0x92bc >> 2),
424 (0x0e00 << 16) | (0x92c0 >> 2),
426 (0x0e00 << 16) | (0x92c4 >> 2),
428 (0x0e00 << 16) | (0x92c8 >> 2),
430 (0x0e00 << 16) | (0x92cc >> 2),
432 (0x0e00 << 16) | (0x92d0 >> 2),
434 (0x0e00 << 16) | (0x8c00 >> 2),
436 (0x0e00 << 16) | (0x8c04 >> 2),
438 (0x0e00 << 16) | (0x8c20 >> 2),
440 (0x0e00 << 16) | (0x8c38 >> 2),
442 (0x0e00 << 16) | (0x8c3c >> 2),
444 (0x0e00 << 16) | (0xae00 >> 2),
446 (0x0e00 << 16) | (0x9604 >> 2),
448 (0x0e00 << 16) | (0xac08 >> 2),
450 (0x0e00 << 16) | (0xac0c >> 2),
452 (0x0e00 << 16) | (0xac10 >> 2),
454 (0x0e00 << 16) | (0xac14 >> 2),
456 (0x0e00 << 16) | (0xac58 >> 2),
458 (0x0e00 << 16) | (0xac68 >> 2),
460 (0x0e00 << 16) | (0xac6c >> 2),
462 (0x0e00 << 16) | (0xac70 >> 2),
464 (0x0e00 << 16) | (0xac74 >> 2),
466 (0x0e00 << 16) | (0xac78 >> 2),
468 (0x0e00 << 16) | (0xac7c >> 2),
470 (0x0e00 << 16) | (0xac80 >> 2),
472 (0x0e00 << 16) | (0xac84 >> 2),
474 (0x0e00 << 16) | (0xac88 >> 2),
476 (0x0e00 << 16) | (0xac8c >> 2),
478 (0x0e00 << 16) | (0x970c >> 2),
480 (0x0e00 << 16) | (0x9714 >> 2),
482 (0x0e00 << 16) | (0x9718 >> 2),
484 (0x0e00 << 16) | (0x971c >> 2),
486 (0x0e00 << 16) | (0x31068 >> 2),
488 (0x4e00 << 16) | (0x31068 >> 2),
490 (0x5e00 << 16) | (0x31068 >> 2),
492 (0x6e00 << 16) | (0x31068 >> 2),
494 (0x7e00 << 16) | (0x31068 >> 2),
496 (0x8e00 << 16) | (0x31068 >> 2),
498 (0x9e00 << 16) | (0x31068 >> 2),
500 (0xae00 << 16) | (0x31068 >> 2),
502 (0xbe00 << 16) | (0x31068 >> 2),
504 (0x0e00 << 16) | (0xcd10 >> 2),
506 (0x0e00 << 16) | (0xcd14 >> 2),
508 (0x0e00 << 16) | (0x88b0 >> 2),
510 (0x0e00 << 16) | (0x88b4 >> 2),
512 (0x0e00 << 16) | (0x88b8 >> 2),
514 (0x0e00 << 16) | (0x88bc >> 2),
516 (0x0400 << 16) | (0x89c0 >> 2),
518 (0x0e00 << 16) | (0x88c4 >> 2),
520 (0x0e00 << 16) | (0x88c8 >> 2),
522 (0x0e00 << 16) | (0x88d0 >> 2),
524 (0x0e00 << 16) | (0x88d4 >> 2),
526 (0x0e00 << 16) | (0x88d8 >> 2),
528 (0x0e00 << 16) | (0x8980 >> 2),
530 (0x0e00 << 16) | (0x30938 >> 2),
532 (0x0e00 << 16) | (0x3093c >> 2),
534 (0x0e00 << 16) | (0x30940 >> 2),
536 (0x0e00 << 16) | (0x89a0 >> 2),
538 (0x0e00 << 16) | (0x30900 >> 2),
540 (0x0e00 << 16) | (0x30904 >> 2),
542 (0x0e00 << 16) | (0x89b4 >> 2),
544 (0x0e00 << 16) | (0x3c210 >> 2),
546 (0x0e00 << 16) | (0x3c214 >> 2),
548 (0x0e00 << 16) | (0x3c218 >> 2),
550 (0x0e00 << 16) | (0x8904 >> 2),
553 (0x0e00 << 16) | (0x8c28 >> 2),
554 (0x0e00 << 16) | (0x8c2c >> 2),
555 (0x0e00 << 16) | (0x8c30 >> 2),
556 (0x0e00 << 16) | (0x8c34 >> 2),
557 (0x0e00 << 16) | (0x9600 >> 2),
560 static const u32 kalindi_rlc_save_restore_register_list[] =
562 (0x0e00 << 16) | (0xc12c >> 2),
564 (0x0e00 << 16) | (0xc140 >> 2),
566 (0x0e00 << 16) | (0xc150 >> 2),
568 (0x0e00 << 16) | (0xc15c >> 2),
570 (0x0e00 << 16) | (0xc168 >> 2),
572 (0x0e00 << 16) | (0xc170 >> 2),
574 (0x0e00 << 16) | (0xc204 >> 2),
576 (0x0e00 << 16) | (0xc2b4 >> 2),
578 (0x0e00 << 16) | (0xc2b8 >> 2),
580 (0x0e00 << 16) | (0xc2bc >> 2),
582 (0x0e00 << 16) | (0xc2c0 >> 2),
584 (0x0e00 << 16) | (0x8228 >> 2),
586 (0x0e00 << 16) | (0x829c >> 2),
588 (0x0e00 << 16) | (0x869c >> 2),
590 (0x0600 << 16) | (0x98f4 >> 2),
592 (0x0e00 << 16) | (0x98f8 >> 2),
594 (0x0e00 << 16) | (0x9900 >> 2),
596 (0x0e00 << 16) | (0xc260 >> 2),
598 (0x0e00 << 16) | (0x90e8 >> 2),
600 (0x0e00 << 16) | (0x3c000 >> 2),
602 (0x0e00 << 16) | (0x3c00c >> 2),
604 (0x0e00 << 16) | (0x8c1c >> 2),
606 (0x0e00 << 16) | (0x9700 >> 2),
608 (0x0e00 << 16) | (0xcd20 >> 2),
610 (0x4e00 << 16) | (0xcd20 >> 2),
612 (0x5e00 << 16) | (0xcd20 >> 2),
614 (0x6e00 << 16) | (0xcd20 >> 2),
616 (0x7e00 << 16) | (0xcd20 >> 2),
618 (0x0e00 << 16) | (0x89bc >> 2),
620 (0x0e00 << 16) | (0x8900 >> 2),
623 (0x0e00 << 16) | (0xc130 >> 2),
625 (0x0e00 << 16) | (0xc134 >> 2),
627 (0x0e00 << 16) | (0xc1fc >> 2),
629 (0x0e00 << 16) | (0xc208 >> 2),
631 (0x0e00 << 16) | (0xc264 >> 2),
633 (0x0e00 << 16) | (0xc268 >> 2),
635 (0x0e00 << 16) | (0xc26c >> 2),
637 (0x0e00 << 16) | (0xc270 >> 2),
639 (0x0e00 << 16) | (0xc274 >> 2),
641 (0x0e00 << 16) | (0xc28c >> 2),
643 (0x0e00 << 16) | (0xc290 >> 2),
645 (0x0e00 << 16) | (0xc294 >> 2),
647 (0x0e00 << 16) | (0xc298 >> 2),
649 (0x0e00 << 16) | (0xc2a0 >> 2),
651 (0x0e00 << 16) | (0xc2a4 >> 2),
653 (0x0e00 << 16) | (0xc2a8 >> 2),
655 (0x0e00 << 16) | (0xc2ac >> 2),
657 (0x0e00 << 16) | (0x301d0 >> 2),
659 (0x0e00 << 16) | (0x30238 >> 2),
661 (0x0e00 << 16) | (0x30250 >> 2),
663 (0x0e00 << 16) | (0x30254 >> 2),
665 (0x0e00 << 16) | (0x30258 >> 2),
667 (0x0e00 << 16) | (0x3025c >> 2),
669 (0x4e00 << 16) | (0xc900 >> 2),
671 (0x5e00 << 16) | (0xc900 >> 2),
673 (0x6e00 << 16) | (0xc900 >> 2),
675 (0x7e00 << 16) | (0xc900 >> 2),
677 (0x4e00 << 16) | (0xc904 >> 2),
679 (0x5e00 << 16) | (0xc904 >> 2),
681 (0x6e00 << 16) | (0xc904 >> 2),
683 (0x7e00 << 16) | (0xc904 >> 2),
685 (0x4e00 << 16) | (0xc908 >> 2),
687 (0x5e00 << 16) | (0xc908 >> 2),
689 (0x6e00 << 16) | (0xc908 >> 2),
691 (0x7e00 << 16) | (0xc908 >> 2),
693 (0x4e00 << 16) | (0xc90c >> 2),
695 (0x5e00 << 16) | (0xc90c >> 2),
697 (0x6e00 << 16) | (0xc90c >> 2),
699 (0x7e00 << 16) | (0xc90c >> 2),
701 (0x4e00 << 16) | (0xc910 >> 2),
703 (0x5e00 << 16) | (0xc910 >> 2),
705 (0x6e00 << 16) | (0xc910 >> 2),
707 (0x7e00 << 16) | (0xc910 >> 2),
709 (0x0e00 << 16) | (0xc99c >> 2),
711 (0x0e00 << 16) | (0x9834 >> 2),
713 (0x0000 << 16) | (0x30f00 >> 2),
715 (0x0000 << 16) | (0x30f04 >> 2),
717 (0x0000 << 16) | (0x30f08 >> 2),
719 (0x0000 << 16) | (0x30f0c >> 2),
721 (0x0600 << 16) | (0x9b7c >> 2),
723 (0x0e00 << 16) | (0x8a14 >> 2),
725 (0x0e00 << 16) | (0x8a18 >> 2),
727 (0x0600 << 16) | (0x30a00 >> 2),
729 (0x0e00 << 16) | (0x8bf0 >> 2),
731 (0x0e00 << 16) | (0x8bcc >> 2),
733 (0x0e00 << 16) | (0x8b24 >> 2),
735 (0x0e00 << 16) | (0x30a04 >> 2),
737 (0x0600 << 16) | (0x30a10 >> 2),
739 (0x0600 << 16) | (0x30a14 >> 2),
741 (0x0600 << 16) | (0x30a18 >> 2),
743 (0x0600 << 16) | (0x30a2c >> 2),
745 (0x0e00 << 16) | (0xc700 >> 2),
747 (0x0e00 << 16) | (0xc704 >> 2),
749 (0x0e00 << 16) | (0xc708 >> 2),
751 (0x0e00 << 16) | (0xc768 >> 2),
753 (0x0400 << 16) | (0xc770 >> 2),
755 (0x0400 << 16) | (0xc774 >> 2),
757 (0x0400 << 16) | (0xc798 >> 2),
759 (0x0400 << 16) | (0xc79c >> 2),
761 (0x0e00 << 16) | (0x9100 >> 2),
763 (0x0e00 << 16) | (0x3c010 >> 2),
765 (0x0e00 << 16) | (0x8c00 >> 2),
767 (0x0e00 << 16) | (0x8c04 >> 2),
769 (0x0e00 << 16) | (0x8c20 >> 2),
771 (0x0e00 << 16) | (0x8c38 >> 2),
773 (0x0e00 << 16) | (0x8c3c >> 2),
775 (0x0e00 << 16) | (0xae00 >> 2),
777 (0x0e00 << 16) | (0x9604 >> 2),
779 (0x0e00 << 16) | (0xac08 >> 2),
781 (0x0e00 << 16) | (0xac0c >> 2),
783 (0x0e00 << 16) | (0xac10 >> 2),
785 (0x0e00 << 16) | (0xac14 >> 2),
787 (0x0e00 << 16) | (0xac58 >> 2),
789 (0x0e00 << 16) | (0xac68 >> 2),
791 (0x0e00 << 16) | (0xac6c >> 2),
793 (0x0e00 << 16) | (0xac70 >> 2),
795 (0x0e00 << 16) | (0xac74 >> 2),
797 (0x0e00 << 16) | (0xac78 >> 2),
799 (0x0e00 << 16) | (0xac7c >> 2),
801 (0x0e00 << 16) | (0xac80 >> 2),
803 (0x0e00 << 16) | (0xac84 >> 2),
805 (0x0e00 << 16) | (0xac88 >> 2),
807 (0x0e00 << 16) | (0xac8c >> 2),
809 (0x0e00 << 16) | (0x970c >> 2),
811 (0x0e00 << 16) | (0x9714 >> 2),
813 (0x0e00 << 16) | (0x9718 >> 2),
815 (0x0e00 << 16) | (0x971c >> 2),
817 (0x0e00 << 16) | (0x31068 >> 2),
819 (0x4e00 << 16) | (0x31068 >> 2),
821 (0x5e00 << 16) | (0x31068 >> 2),
823 (0x6e00 << 16) | (0x31068 >> 2),
825 (0x7e00 << 16) | (0x31068 >> 2),
827 (0x0e00 << 16) | (0xcd10 >> 2),
829 (0x0e00 << 16) | (0xcd14 >> 2),
831 (0x0e00 << 16) | (0x88b0 >> 2),
833 (0x0e00 << 16) | (0x88b4 >> 2),
835 (0x0e00 << 16) | (0x88b8 >> 2),
837 (0x0e00 << 16) | (0x88bc >> 2),
839 (0x0400 << 16) | (0x89c0 >> 2),
841 (0x0e00 << 16) | (0x88c4 >> 2),
843 (0x0e00 << 16) | (0x88c8 >> 2),
845 (0x0e00 << 16) | (0x88d0 >> 2),
847 (0x0e00 << 16) | (0x88d4 >> 2),
849 (0x0e00 << 16) | (0x88d8 >> 2),
851 (0x0e00 << 16) | (0x8980 >> 2),
853 (0x0e00 << 16) | (0x30938 >> 2),
855 (0x0e00 << 16) | (0x3093c >> 2),
857 (0x0e00 << 16) | (0x30940 >> 2),
859 (0x0e00 << 16) | (0x89a0 >> 2),
861 (0x0e00 << 16) | (0x30900 >> 2),
863 (0x0e00 << 16) | (0x30904 >> 2),
865 (0x0e00 << 16) | (0x89b4 >> 2),
867 (0x0e00 << 16) | (0x3e1fc >> 2),
869 (0x0e00 << 16) | (0x3c210 >> 2),
871 (0x0e00 << 16) | (0x3c214 >> 2),
873 (0x0e00 << 16) | (0x3c218 >> 2),
875 (0x0e00 << 16) | (0x8904 >> 2),
878 (0x0e00 << 16) | (0x8c28 >> 2),
879 (0x0e00 << 16) | (0x8c2c >> 2),
880 (0x0e00 << 16) | (0x8c30 >> 2),
881 (0x0e00 << 16) | (0x8c34 >> 2),
882 (0x0e00 << 16) | (0x9600 >> 2),
885 static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
886 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
887 static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
888 static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
894 * gfx_v7_0_init_microcode - load ucode images from disk
896 * @adev: amdgpu_device pointer
898 * Use the firmware interface to load the ucode images into
899 * the driver (not loaded into hw).
900 * Returns 0 on success, error on failure.
902 static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
904 const char *chip_name;
910 switch (adev->asic_type) {
912 chip_name = "bonaire";
915 chip_name = "hawaii";
918 chip_name = "kaveri";
921 chip_name = "kabini";
924 chip_name = "mullins";
929 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
930 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
933 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
937 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
938 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
941 err = amdgpu_ucode_validate(adev->gfx.me_fw);
945 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
946 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
949 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
953 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
954 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
957 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
961 if (adev->asic_type == CHIP_KAVERI) {
962 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
963 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
966 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
971 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
972 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
975 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
979 pr_err("gfx7: Failed to load firmware \"%s\"\n", fw_name);
980 release_firmware(adev->gfx.pfp_fw);
981 adev->gfx.pfp_fw = NULL;
982 release_firmware(adev->gfx.me_fw);
983 adev->gfx.me_fw = NULL;
984 release_firmware(adev->gfx.ce_fw);
985 adev->gfx.ce_fw = NULL;
986 release_firmware(adev->gfx.mec_fw);
987 adev->gfx.mec_fw = NULL;
988 release_firmware(adev->gfx.mec2_fw);
989 adev->gfx.mec2_fw = NULL;
990 release_firmware(adev->gfx.rlc_fw);
991 adev->gfx.rlc_fw = NULL;
996 static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
998 release_firmware(adev->gfx.pfp_fw);
999 adev->gfx.pfp_fw = NULL;
1000 release_firmware(adev->gfx.me_fw);
1001 adev->gfx.me_fw = NULL;
1002 release_firmware(adev->gfx.ce_fw);
1003 adev->gfx.ce_fw = NULL;
1004 release_firmware(adev->gfx.mec_fw);
1005 adev->gfx.mec_fw = NULL;
1006 release_firmware(adev->gfx.mec2_fw);
1007 adev->gfx.mec2_fw = NULL;
1008 release_firmware(adev->gfx.rlc_fw);
1009 adev->gfx.rlc_fw = NULL;
1013 * gfx_v7_0_tiling_mode_table_init - init the hw tiling table
1015 * @adev: amdgpu_device pointer
1017 * Starting with SI, the tiling setup is done globally in a
1018 * set of 32 tiling modes. Rather than selecting each set of
1019 * parameters per surface as on older asics, we just select
1020 * which index in the tiling table we want to use, and the
1021 * surface uses those parameters (CIK).
1023 static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev)
1025 const u32 num_tile_mode_states =
1026 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
1027 const u32 num_secondary_tile_mode_states =
1028 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
1029 u32 reg_offset, split_equal_to_row_size;
1030 uint32_t *tile, *macrotile;
1032 tile = adev->gfx.config.tile_mode_array;
1033 macrotile = adev->gfx.config.macrotile_mode_array;
1035 switch (adev->gfx.config.mem_row_size_in_kb) {
1037 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
1041 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
1044 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
1048 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1049 tile[reg_offset] = 0;
1050 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1051 macrotile[reg_offset] = 0;
1053 switch (adev->asic_type) {
1055 tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1056 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1057 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1058 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1059 tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1060 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1061 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1062 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1063 tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1064 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1065 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1066 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1067 tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1068 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1069 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1070 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1071 tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1072 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1073 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1074 TILE_SPLIT(split_equal_to_row_size));
1075 tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1076 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1077 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1078 tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1079 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1080 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1081 TILE_SPLIT(split_equal_to_row_size));
1082 tile[7] = (TILE_SPLIT(split_equal_to_row_size));
1083 tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1084 PIPE_CONFIG(ADDR_SURF_P4_16x16));
1085 tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1086 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1087 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1088 tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1089 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1090 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1091 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1092 tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1093 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1094 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1095 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1096 tile[12] = (TILE_SPLIT(split_equal_to_row_size));
1097 tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1098 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1099 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1100 tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1101 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1102 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1103 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1104 tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1105 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1106 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1107 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1108 tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1109 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1110 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1111 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1112 tile[17] = (TILE_SPLIT(split_equal_to_row_size));
1113 tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1114 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1115 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1116 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1117 tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1118 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1119 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1120 tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1121 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1122 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1123 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1124 tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1125 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1126 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1127 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1128 tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1129 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1130 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1131 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1132 tile[23] = (TILE_SPLIT(split_equal_to_row_size));
1133 tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1134 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1135 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1136 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1137 tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1138 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1139 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1140 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1141 tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1142 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1143 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1144 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1145 tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1146 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1147 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1148 tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1149 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1150 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1151 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1152 tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1153 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1154 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1155 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1156 tile[30] = (TILE_SPLIT(split_equal_to_row_size));
1158 macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1159 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1160 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1161 NUM_BANKS(ADDR_SURF_16_BANK));
1162 macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1163 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1164 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1165 NUM_BANKS(ADDR_SURF_16_BANK));
1166 macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1167 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1168 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1169 NUM_BANKS(ADDR_SURF_16_BANK));
1170 macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1171 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1172 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1173 NUM_BANKS(ADDR_SURF_16_BANK));
1174 macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1175 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1176 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1177 NUM_BANKS(ADDR_SURF_16_BANK));
1178 macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1179 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1180 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1181 NUM_BANKS(ADDR_SURF_8_BANK));
1182 macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1183 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1184 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1185 NUM_BANKS(ADDR_SURF_4_BANK));
1186 macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1187 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1188 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1189 NUM_BANKS(ADDR_SURF_16_BANK));
1190 macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1191 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1192 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1193 NUM_BANKS(ADDR_SURF_16_BANK));
1194 macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1195 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1196 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1197 NUM_BANKS(ADDR_SURF_16_BANK));
1198 macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1199 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1200 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1201 NUM_BANKS(ADDR_SURF_16_BANK));
1202 macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1203 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1204 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1205 NUM_BANKS(ADDR_SURF_16_BANK));
1206 macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1207 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1208 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1209 NUM_BANKS(ADDR_SURF_8_BANK));
1210 macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1211 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1212 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1213 NUM_BANKS(ADDR_SURF_4_BANK));
1215 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1216 WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1217 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1218 if (reg_offset != 7)
1219 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1222 tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1223 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1224 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1225 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1226 tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1227 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1228 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1229 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1230 tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1231 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1232 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1233 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1234 tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1235 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1236 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1237 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1238 tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1239 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1240 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1241 TILE_SPLIT(split_equal_to_row_size));
1242 tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1243 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1244 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1245 TILE_SPLIT(split_equal_to_row_size));
1246 tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1247 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1248 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1249 TILE_SPLIT(split_equal_to_row_size));
1250 tile[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1251 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1252 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1253 TILE_SPLIT(split_equal_to_row_size));
1254 tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1255 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
1256 tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1257 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1258 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1259 tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1260 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1261 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1262 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1263 tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1264 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1265 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1266 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1267 tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1268 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1269 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1270 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1271 tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1272 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1273 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1274 tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1275 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1276 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1277 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1278 tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1279 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1280 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1281 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1282 tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1283 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1284 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1285 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1286 tile[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1287 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1288 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1289 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1290 tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1291 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1292 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1293 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1294 tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1295 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1296 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
1297 tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1298 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1299 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1300 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1301 tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1302 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1303 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1304 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1305 tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1306 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1307 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1308 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1309 tile[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1310 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1311 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1312 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1313 tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1314 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1315 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1316 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1317 tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1318 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1319 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1320 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1321 tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1322 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1323 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1324 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1325 tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1326 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1327 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1328 tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1329 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1330 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1331 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1332 tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1333 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1334 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1335 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1336 tile[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1337 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1338 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1339 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1341 macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1342 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1343 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1344 NUM_BANKS(ADDR_SURF_16_BANK));
1345 macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1346 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1347 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1348 NUM_BANKS(ADDR_SURF_16_BANK));
1349 macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1350 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1351 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1352 NUM_BANKS(ADDR_SURF_16_BANK));
1353 macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1354 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1355 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1356 NUM_BANKS(ADDR_SURF_16_BANK));
1357 macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1358 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1359 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1360 NUM_BANKS(ADDR_SURF_8_BANK));
1361 macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1362 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1363 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1364 NUM_BANKS(ADDR_SURF_4_BANK));
1365 macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1366 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1367 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1368 NUM_BANKS(ADDR_SURF_4_BANK));
1369 macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1370 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1371 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1372 NUM_BANKS(ADDR_SURF_16_BANK));
1373 macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1374 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1375 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1376 NUM_BANKS(ADDR_SURF_16_BANK));
1377 macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1378 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1379 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1380 NUM_BANKS(ADDR_SURF_16_BANK));
1381 macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1382 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1383 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1384 NUM_BANKS(ADDR_SURF_8_BANK));
1385 macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1386 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1387 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1388 NUM_BANKS(ADDR_SURF_16_BANK));
1389 macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1390 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1391 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1392 NUM_BANKS(ADDR_SURF_8_BANK));
1393 macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1394 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1395 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1396 NUM_BANKS(ADDR_SURF_4_BANK));
1398 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1399 WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1400 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1401 if (reg_offset != 7)
1402 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1408 tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1409 PIPE_CONFIG(ADDR_SURF_P2) |
1410 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1411 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1412 tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1413 PIPE_CONFIG(ADDR_SURF_P2) |
1414 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1415 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1416 tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1417 PIPE_CONFIG(ADDR_SURF_P2) |
1418 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1419 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1420 tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1421 PIPE_CONFIG(ADDR_SURF_P2) |
1422 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1423 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1424 tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1425 PIPE_CONFIG(ADDR_SURF_P2) |
1426 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1427 TILE_SPLIT(split_equal_to_row_size));
1428 tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1429 PIPE_CONFIG(ADDR_SURF_P2) |
1430 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1431 tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1432 PIPE_CONFIG(ADDR_SURF_P2) |
1433 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1434 TILE_SPLIT(split_equal_to_row_size));
1435 tile[7] = (TILE_SPLIT(split_equal_to_row_size));
1436 tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1437 PIPE_CONFIG(ADDR_SURF_P2));
1438 tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1439 PIPE_CONFIG(ADDR_SURF_P2) |
1440 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1441 tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1442 PIPE_CONFIG(ADDR_SURF_P2) |
1443 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1444 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1445 tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1446 PIPE_CONFIG(ADDR_SURF_P2) |
1447 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1448 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1449 tile[12] = (TILE_SPLIT(split_equal_to_row_size));
1450 tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1451 PIPE_CONFIG(ADDR_SURF_P2) |
1452 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1453 tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1454 PIPE_CONFIG(ADDR_SURF_P2) |
1455 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1456 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1457 tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1458 PIPE_CONFIG(ADDR_SURF_P2) |
1459 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1460 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1461 tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1462 PIPE_CONFIG(ADDR_SURF_P2) |
1463 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1464 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1465 tile[17] = (TILE_SPLIT(split_equal_to_row_size));
1466 tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1467 PIPE_CONFIG(ADDR_SURF_P2) |
1468 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1469 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1470 tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1471 PIPE_CONFIG(ADDR_SURF_P2) |
1472 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
1473 tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1474 PIPE_CONFIG(ADDR_SURF_P2) |
1475 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1476 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1477 tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1478 PIPE_CONFIG(ADDR_SURF_P2) |
1479 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1480 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1481 tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1482 PIPE_CONFIG(ADDR_SURF_P2) |
1483 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1484 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1485 tile[23] = (TILE_SPLIT(split_equal_to_row_size));
1486 tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1487 PIPE_CONFIG(ADDR_SURF_P2) |
1488 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1489 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1490 tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1491 PIPE_CONFIG(ADDR_SURF_P2) |
1492 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1493 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1494 tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1495 PIPE_CONFIG(ADDR_SURF_P2) |
1496 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1497 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1498 tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1499 PIPE_CONFIG(ADDR_SURF_P2) |
1500 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1501 tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1502 PIPE_CONFIG(ADDR_SURF_P2) |
1503 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1504 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1505 tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1506 PIPE_CONFIG(ADDR_SURF_P2) |
1507 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1508 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1509 tile[30] = (TILE_SPLIT(split_equal_to_row_size));
1511 macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1512 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1513 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1514 NUM_BANKS(ADDR_SURF_8_BANK));
1515 macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1516 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1517 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1518 NUM_BANKS(ADDR_SURF_8_BANK));
1519 macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1520 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1521 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1522 NUM_BANKS(ADDR_SURF_8_BANK));
1523 macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1524 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1525 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1526 NUM_BANKS(ADDR_SURF_8_BANK));
1527 macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1528 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1529 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1530 NUM_BANKS(ADDR_SURF_8_BANK));
1531 macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1532 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1533 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1534 NUM_BANKS(ADDR_SURF_8_BANK));
1535 macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1536 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1537 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1538 NUM_BANKS(ADDR_SURF_8_BANK));
1539 macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1540 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1541 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1542 NUM_BANKS(ADDR_SURF_16_BANK));
1543 macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1544 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1545 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1546 NUM_BANKS(ADDR_SURF_16_BANK));
1547 macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1548 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1549 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1550 NUM_BANKS(ADDR_SURF_16_BANK));
1551 macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1552 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1553 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1554 NUM_BANKS(ADDR_SURF_16_BANK));
1555 macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1556 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1557 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1558 NUM_BANKS(ADDR_SURF_16_BANK));
1559 macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1560 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1561 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1562 NUM_BANKS(ADDR_SURF_16_BANK));
1563 macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1564 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1565 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1566 NUM_BANKS(ADDR_SURF_8_BANK));
1568 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1569 WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1570 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1571 if (reg_offset != 7)
1572 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1578 * gfx_v7_0_select_se_sh - select which SE, SH to address
1580 * @adev: amdgpu_device pointer
1581 * @se_num: shader engine to address
1582 * @sh_num: sh block to address
1584 * Select which SE, SH combinations to address. Certain
1585 * registers are instanced per SE or SH. 0xffffffff means
1586 * broadcast to all SEs or SHs (CIK).
1588 static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev,
1589 u32 se_num, u32 sh_num, u32 instance)
1593 if (instance == 0xffffffff)
1594 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1596 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1598 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
1599 data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
1600 GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
1601 else if (se_num == 0xffffffff)
1602 data |= GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK |
1603 (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT);
1604 else if (sh_num == 0xffffffff)
1605 data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
1606 (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1608 data |= (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT) |
1609 (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1610 WREG32(mmGRBM_GFX_INDEX, data);
1614 * gfx_v7_0_get_rb_active_bitmap - computes the mask of enabled RBs
1616 * @adev: amdgpu_device pointer
1618 * Calculates the bitmask of enabled RBs (CIK).
1619 * Returns the enabled RB bitmask.
1621 static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1625 data = RREG32(mmCC_RB_BACKEND_DISABLE);
1626 data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
1628 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1629 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1631 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1632 adev->gfx.config.max_sh_per_se);
1634 return (~data) & mask;
1638 gfx_v7_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
1640 switch (adev->asic_type) {
1642 *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
1643 SE_XSEL(1) | SE_YSEL(1);
1647 *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
1648 RB_XSEL2(1) | PKR_MAP(2) | PKR_XSEL(1) |
1649 PKR_YSEL(1) | SE_MAP(2) | SE_XSEL(2) |
1651 *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
1655 *rconf |= RB_MAP_PKR0(2);
1664 DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
1670 gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev,
1671 u32 raster_config, u32 raster_config_1,
1672 unsigned rb_mask, unsigned num_rb)
1674 unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
1675 unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
1676 unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
1677 unsigned rb_per_se = num_rb / num_se;
1678 unsigned se_mask[4];
1681 se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
1682 se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
1683 se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
1684 se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
1686 WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
1687 WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
1688 WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
1690 if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
1691 (!se_mask[2] && !se_mask[3]))) {
1692 raster_config_1 &= ~SE_PAIR_MAP_MASK;
1694 if (!se_mask[0] && !se_mask[1]) {
1696 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
1699 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
1703 for (se = 0; se < num_se; se++) {
1704 unsigned raster_config_se = raster_config;
1705 unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
1706 unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
1707 int idx = (se / 2) * 2;
1709 if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
1710 raster_config_se &= ~SE_MAP_MASK;
1712 if (!se_mask[idx]) {
1713 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
1715 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
1719 pkr0_mask &= rb_mask;
1720 pkr1_mask &= rb_mask;
1721 if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
1722 raster_config_se &= ~PKR_MAP_MASK;
1725 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
1727 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
1731 if (rb_per_se >= 2) {
1732 unsigned rb0_mask = 1 << (se * rb_per_se);
1733 unsigned rb1_mask = rb0_mask << 1;
1735 rb0_mask &= rb_mask;
1736 rb1_mask &= rb_mask;
1737 if (!rb0_mask || !rb1_mask) {
1738 raster_config_se &= ~RB_MAP_PKR0_MASK;
1742 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
1745 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
1749 if (rb_per_se > 2) {
1750 rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
1751 rb1_mask = rb0_mask << 1;
1752 rb0_mask &= rb_mask;
1753 rb1_mask &= rb_mask;
1754 if (!rb0_mask || !rb1_mask) {
1755 raster_config_se &= ~RB_MAP_PKR1_MASK;
1759 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
1762 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
1768 /* GRBM_GFX_INDEX has a different offset on CI+ */
1769 gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
1770 WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
1771 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
1774 /* GRBM_GFX_INDEX has a different offset on CI+ */
1775 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1779 * gfx_v7_0_setup_rb - setup the RBs on the asic
1781 * @adev: amdgpu_device pointer
1782 * @se_num: number of SEs (shader engines) for the asic
1783 * @sh_per_se: number of SH blocks per SE for the asic
1785 * Configures per-SE/SH RB registers (CIK).
1787 static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
1791 u32 raster_config = 0, raster_config_1 = 0;
1793 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1794 adev->gfx.config.max_sh_per_se;
1795 unsigned num_rb_pipes;
1797 mutex_lock(&adev->grbm_idx_mutex);
1798 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1799 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1800 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
1801 data = gfx_v7_0_get_rb_active_bitmap(adev);
1802 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1803 rb_bitmap_width_per_sh);
1806 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1808 adev->gfx.config.backend_enable_mask = active_rbs;
1809 adev->gfx.config.num_rbs = hweight32(active_rbs);
1811 num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
1812 adev->gfx.config.max_shader_engines, 16);
1814 gfx_v7_0_raster_config(adev, &raster_config, &raster_config_1);
1816 if (!adev->gfx.config.backend_enable_mask ||
1817 adev->gfx.config.num_rbs >= num_rb_pipes) {
1818 WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
1819 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
1821 gfx_v7_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
1822 adev->gfx.config.backend_enable_mask,
1826 /* cache the values for userspace */
1827 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1828 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1829 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
1830 adev->gfx.config.rb_config[i][j].rb_backend_disable =
1831 RREG32(mmCC_RB_BACKEND_DISABLE);
1832 adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
1833 RREG32(mmGC_USER_RB_BACKEND_DISABLE);
1834 adev->gfx.config.rb_config[i][j].raster_config =
1835 RREG32(mmPA_SC_RASTER_CONFIG);
1836 adev->gfx.config.rb_config[i][j].raster_config_1 =
1837 RREG32(mmPA_SC_RASTER_CONFIG_1);
1840 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1841 mutex_unlock(&adev->grbm_idx_mutex);
1845 * gfx_v7_0_init_compute_vmid - gart enable
1847 * @adev: amdgpu_device pointer
1849 * Initialize compute vmid sh_mem registers
1852 #define DEFAULT_SH_MEM_BASES (0x6000)
1853 #define FIRST_COMPUTE_VMID (8)
1854 #define LAST_COMPUTE_VMID (16)
1855 static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
1858 uint32_t sh_mem_config;
1859 uint32_t sh_mem_bases;
1862 * Configure apertures:
1863 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1864 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1865 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1867 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1868 sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1869 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1870 sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT;
1871 mutex_lock(&adev->srbm_mutex);
1872 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1873 cik_srbm_select(adev, 0, 0, 0, i);
1874 /* CP and shaders */
1875 WREG32(mmSH_MEM_CONFIG, sh_mem_config);
1876 WREG32(mmSH_MEM_APE1_BASE, 1);
1877 WREG32(mmSH_MEM_APE1_LIMIT, 0);
1878 WREG32(mmSH_MEM_BASES, sh_mem_bases);
1880 cik_srbm_select(adev, 0, 0, 0, 0);
1881 mutex_unlock(&adev->srbm_mutex);
1883 /* Initialize all compute VMIDs to have no GDS, GWS, or OA
1884 acccess. These should be enabled by FW for target VMIDs. */
1885 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1886 WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
1887 WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
1888 WREG32(amdgpu_gds_reg_offset[i].gws, 0);
1889 WREG32(amdgpu_gds_reg_offset[i].oa, 0);
1893 static void gfx_v7_0_init_gds_vmid(struct amdgpu_device *adev)
1898 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1899 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1900 * the driver can enable them for graphics. VMID0 should maintain
1901 * access so that HWS firmware can save/restore entries.
1903 for (vmid = 1; vmid < 16; vmid++) {
1904 WREG32(amdgpu_gds_reg_offset[vmid].mem_base, 0);
1905 WREG32(amdgpu_gds_reg_offset[vmid].mem_size, 0);
1906 WREG32(amdgpu_gds_reg_offset[vmid].gws, 0);
1907 WREG32(amdgpu_gds_reg_offset[vmid].oa, 0);
1911 static void gfx_v7_0_config_init(struct amdgpu_device *adev)
1913 adev->gfx.config.double_offchip_lds_buf = 1;
1917 * gfx_v7_0_constants_init - setup the 3D engine
1919 * @adev: amdgpu_device pointer
1921 * init the gfx constants such as the 3D engine, tiling configuration
1922 * registers, maximum number of quad pipes, render backends...
1924 static void gfx_v7_0_constants_init(struct amdgpu_device *adev)
1926 u32 sh_mem_cfg, sh_static_mem_cfg, sh_mem_base;
1930 WREG32(mmGRBM_CNTL, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT));
1932 WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
1933 WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
1934 WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
1936 gfx_v7_0_tiling_mode_table_init(adev);
1938 gfx_v7_0_setup_rb(adev);
1939 gfx_v7_0_get_cu_info(adev);
1940 gfx_v7_0_config_init(adev);
1942 /* set HW defaults for 3D engine */
1943 WREG32(mmCP_MEQ_THRESHOLDS,
1944 (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
1945 (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
1947 mutex_lock(&adev->grbm_idx_mutex);
1949 * making sure that the following register writes will be broadcasted
1950 * to all the shaders
1952 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1954 /* XXX SH_MEM regs */
1955 /* where to put LDS, scratch, GPUVM in FSA64 space */
1956 sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1957 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1958 sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, DEFAULT_MTYPE,
1960 sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, APE1_MTYPE,
1962 sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, PRIVATE_ATC, 0);
1964 sh_static_mem_cfg = REG_SET_FIELD(0, SH_STATIC_MEM_CONFIG,
1966 sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
1968 sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
1970 WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
1972 mutex_lock(&adev->srbm_mutex);
1973 for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
1977 sh_mem_base = adev->gmc.shared_aperture_start >> 48;
1978 cik_srbm_select(adev, 0, 0, 0, i);
1979 /* CP and shaders */
1980 WREG32(mmSH_MEM_CONFIG, sh_mem_cfg);
1981 WREG32(mmSH_MEM_APE1_BASE, 1);
1982 WREG32(mmSH_MEM_APE1_LIMIT, 0);
1983 WREG32(mmSH_MEM_BASES, sh_mem_base);
1985 cik_srbm_select(adev, 0, 0, 0, 0);
1986 mutex_unlock(&adev->srbm_mutex);
1988 gfx_v7_0_init_compute_vmid(adev);
1989 gfx_v7_0_init_gds_vmid(adev);
1991 WREG32(mmSX_DEBUG_1, 0x20);
1993 WREG32(mmTA_CNTL_AUX, 0x00010000);
1995 tmp = RREG32(mmSPI_CONFIG_CNTL);
1997 WREG32(mmSPI_CONFIG_CNTL, tmp);
1999 WREG32(mmSQ_CONFIG, 1);
2001 WREG32(mmDB_DEBUG, 0);
2003 tmp = RREG32(mmDB_DEBUG2) & ~0xf00fffff;
2005 WREG32(mmDB_DEBUG2, tmp);
2007 tmp = RREG32(mmDB_DEBUG3) & ~0x0002021c;
2009 WREG32(mmDB_DEBUG3, tmp);
2011 tmp = RREG32(mmCB_HW_CONTROL) & ~0x00010000;
2013 WREG32(mmCB_HW_CONTROL, tmp);
2015 WREG32(mmSPI_CONFIG_CNTL_1, (4 << SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT));
2017 WREG32(mmPA_SC_FIFO_SIZE,
2018 ((adev->gfx.config.sc_prim_fifo_size_frontend << PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
2019 (adev->gfx.config.sc_prim_fifo_size_backend << PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
2020 (adev->gfx.config.sc_hiz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
2021 (adev->gfx.config.sc_earlyz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)));
2023 WREG32(mmVGT_NUM_INSTANCES, 1);
2025 WREG32(mmCP_PERFMON_CNTL, 0);
2027 WREG32(mmSQ_CONFIG, 0);
2029 WREG32(mmPA_SC_FORCE_EOV_MAX_CNTS,
2030 ((4095 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT) |
2031 (255 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT)));
2033 WREG32(mmVGT_CACHE_INVALIDATION,
2034 (VC_AND_TC << VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT) |
2035 (ES_AND_GS_AUTO << VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT));
2037 WREG32(mmVGT_GS_VERTEX_REUSE, 16);
2038 WREG32(mmPA_SC_LINE_STIPPLE_STATE, 0);
2040 WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK |
2041 (3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT));
2042 WREG32(mmPA_SC_ENHANCE, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK);
2044 tmp = RREG32(mmSPI_ARB_PRIORITY);
2045 tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2);
2046 tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2);
2047 tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2);
2048 tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2);
2049 WREG32(mmSPI_ARB_PRIORITY, tmp);
2051 mutex_unlock(&adev->grbm_idx_mutex);
2057 * GPU scratch registers helpers function.
2060 * gfx_v7_0_scratch_init - setup driver info for CP scratch regs
2062 * @adev: amdgpu_device pointer
2064 * Set up the number and offset of the CP scratch registers.
2065 * NOTE: use of CP scratch registers is a legacy inferface and
2066 * is not used by default on newer asics (r6xx+). On newer asics,
2067 * memory buffers are used for fences rather than scratch regs.
2069 static void gfx_v7_0_scratch_init(struct amdgpu_device *adev)
2071 adev->gfx.scratch.num_reg = 8;
2072 adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
2073 adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
2077 * gfx_v7_0_ring_test_ring - basic gfx ring test
2079 * @adev: amdgpu_device pointer
2080 * @ring: amdgpu_ring structure holding ring information
2082 * Allocate a scratch register and write to it using the gfx ring (CIK).
2083 * Provides a basic gfx ring test to verify that the ring is working.
2084 * Used by gfx_v7_0_cp_gfx_resume();
2085 * Returns 0 on success, error on failure.
2087 static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
2089 struct amdgpu_device *adev = ring->adev;
2095 r = amdgpu_gfx_scratch_get(adev, &scratch);
2099 WREG32(scratch, 0xCAFEDEAD);
2100 r = amdgpu_ring_alloc(ring, 3);
2102 goto error_free_scratch;
2104 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
2105 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
2106 amdgpu_ring_write(ring, 0xDEADBEEF);
2107 amdgpu_ring_commit(ring);
2109 for (i = 0; i < adev->usec_timeout; i++) {
2110 tmp = RREG32(scratch);
2111 if (tmp == 0xDEADBEEF)
2115 if (i >= adev->usec_timeout)
2119 amdgpu_gfx_scratch_free(adev, scratch);
2124 * gfx_v7_0_ring_emit_hdp - emit an hdp flush on the cp
2126 * @adev: amdgpu_device pointer
2127 * @ridx: amdgpu ring index
2129 * Emits an hdp flush on the cp.
2131 static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
2134 int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
2136 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
2139 ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
2142 ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
2148 ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
2151 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
2152 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
2153 WAIT_REG_MEM_FUNCTION(3) | /* == */
2154 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
2155 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
2156 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
2157 amdgpu_ring_write(ring, ref_and_mask);
2158 amdgpu_ring_write(ring, ref_and_mask);
2159 amdgpu_ring_write(ring, 0x20); /* poll interval */
2162 static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
2164 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2165 amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) |
2168 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2169 amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
2174 * gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring
2176 * @adev: amdgpu_device pointer
2177 * @fence: amdgpu fence object
2179 * Emits a fence sequnce number on the gfx ring and flushes
2182 static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
2183 u64 seq, unsigned flags)
2185 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2186 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2187 /* Workaround for cache flush problems. First send a dummy EOP
2188 * event down the pipe with seq one below.
2190 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2191 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2193 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2195 amdgpu_ring_write(ring, addr & 0xfffffffc);
2196 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
2197 DATA_SEL(1) | INT_SEL(0));
2198 amdgpu_ring_write(ring, lower_32_bits(seq - 1));
2199 amdgpu_ring_write(ring, upper_32_bits(seq - 1));
2201 /* Then send the real EOP event down the pipe. */
2202 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2203 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2205 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2207 amdgpu_ring_write(ring, addr & 0xfffffffc);
2208 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
2209 DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2210 amdgpu_ring_write(ring, lower_32_bits(seq));
2211 amdgpu_ring_write(ring, upper_32_bits(seq));
2215 * gfx_v7_0_ring_emit_fence_compute - emit a fence on the compute ring
2217 * @adev: amdgpu_device pointer
2218 * @fence: amdgpu fence object
2220 * Emits a fence sequnce number on the compute ring and flushes
2223 static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
2227 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2228 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2230 /* RELEASE_MEM - flush caches, send int */
2231 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
2232 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2234 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2236 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2237 amdgpu_ring_write(ring, addr & 0xfffffffc);
2238 amdgpu_ring_write(ring, upper_32_bits(addr));
2239 amdgpu_ring_write(ring, lower_32_bits(seq));
2240 amdgpu_ring_write(ring, upper_32_bits(seq));
2247 * gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring
2249 * @ring: amdgpu_ring structure holding ring information
2250 * @ib: amdgpu indirect buffer object
2252 * Emits an DE (drawing engine) or CE (constant engine) IB
2253 * on the gfx ring. IBs are usually generated by userspace
2254 * acceleration drivers and submitted to the kernel for
2255 * sheduling on the ring. This function schedules the IB
2256 * on the gfx ring for execution by the GPU.
2258 static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
2259 struct amdgpu_job *job,
2260 struct amdgpu_ib *ib,
2263 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
2264 u32 header, control = 0;
2266 /* insert SWITCH_BUFFER packet before first IB in the ring frame */
2267 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
2268 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
2269 amdgpu_ring_write(ring, 0);
2272 if (ib->flags & AMDGPU_IB_FLAG_CE)
2273 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
2275 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
2277 control |= ib->length_dw | (vmid << 24);
2279 amdgpu_ring_write(ring, header);
2280 amdgpu_ring_write(ring,
2284 (ib->gpu_addr & 0xFFFFFFFC));
2285 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2286 amdgpu_ring_write(ring, control);
2289 static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
2290 struct amdgpu_job *job,
2291 struct amdgpu_ib *ib,
2294 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
2295 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
2297 /* Currently, there is a high possibility to get wave ID mismatch
2298 * between ME and GDS, leading to a hw deadlock, because ME generates
2299 * different wave IDs than the GDS expects. This situation happens
2300 * randomly when at least 5 compute pipes use GDS ordered append.
2301 * The wave IDs generated by ME are also wrong after suspend/resume.
2302 * Those are probably bugs somewhere else in the kernel driver.
2304 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
2305 * GDS to 0 for this ring (me/pipe).
2307 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
2308 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2309 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START);
2310 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
2313 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2314 amdgpu_ring_write(ring,
2318 (ib->gpu_addr & 0xFFFFFFFC));
2319 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2320 amdgpu_ring_write(ring, control);
2323 static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
2327 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
2328 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
2329 gfx_v7_0_ring_emit_vgt_flush(ring);
2330 /* set load_global_config & load_global_uconfig */
2332 /* set load_cs_sh_regs */
2334 /* set load_per_context_state & load_gfx_sh_regs */
2338 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2339 amdgpu_ring_write(ring, dw2);
2340 amdgpu_ring_write(ring, 0);
2344 * gfx_v7_0_ring_test_ib - basic ring IB test
2346 * @ring: amdgpu_ring structure holding ring information
2348 * Allocate an IB and execute it on the gfx ring (CIK).
2349 * Provides a basic gfx ring test to verify that IBs are working.
2350 * Returns 0 on success, error on failure.
2352 static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
2354 struct amdgpu_device *adev = ring->adev;
2355 struct amdgpu_ib ib;
2356 struct dma_fence *f = NULL;
2361 r = amdgpu_gfx_scratch_get(adev, &scratch);
2365 WREG32(scratch, 0xCAFEDEAD);
2366 memset(&ib, 0, sizeof(ib));
2367 r = amdgpu_ib_get(adev, NULL, 256, &ib);
2371 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
2372 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
2373 ib.ptr[2] = 0xDEADBEEF;
2376 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
2380 r = dma_fence_wait_timeout(f, false, timeout);
2387 tmp = RREG32(scratch);
2388 if (tmp == 0xDEADBEEF)
2394 amdgpu_ib_free(adev, &ib, NULL);
2397 amdgpu_gfx_scratch_free(adev, scratch);
2403 * On CIK, gfx and compute now have independant command processors.
2406 * Gfx consists of a single ring and can process both gfx jobs and
2407 * compute jobs. The gfx CP consists of three microengines (ME):
2408 * PFP - Pre-Fetch Parser
2410 * CE - Constant Engine
2411 * The PFP and ME make up what is considered the Drawing Engine (DE).
2412 * The CE is an asynchronous engine used for updating buffer desciptors
2413 * used by the DE so that they can be loaded into cache in parallel
2414 * while the DE is processing state update packets.
2417 * The compute CP consists of two microengines (ME):
2418 * MEC1 - Compute MicroEngine 1
2419 * MEC2 - Compute MicroEngine 2
2420 * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
2421 * The queues are exposed to userspace and are programmed directly
2422 * by the compute runtime.
2425 * gfx_v7_0_cp_gfx_enable - enable/disable the gfx CP MEs
2427 * @adev: amdgpu_device pointer
2428 * @enable: enable or disable the MEs
2430 * Halts or unhalts the gfx MEs.
2432 static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2437 WREG32(mmCP_ME_CNTL, 0);
2439 WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK));
2440 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2441 adev->gfx.gfx_ring[i].sched.ready = false;
2447 * gfx_v7_0_cp_gfx_load_microcode - load the gfx CP ME ucode
2449 * @adev: amdgpu_device pointer
2451 * Loads the gfx PFP, ME, and CE ucode.
2452 * Returns 0 for success, -EINVAL if the ucode is not available.
2454 static int gfx_v7_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2456 const struct gfx_firmware_header_v1_0 *pfp_hdr;
2457 const struct gfx_firmware_header_v1_0 *ce_hdr;
2458 const struct gfx_firmware_header_v1_0 *me_hdr;
2459 const __le32 *fw_data;
2460 unsigned i, fw_size;
2462 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2465 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
2466 ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
2467 me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
2469 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2470 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2471 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2472 adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version);
2473 adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version);
2474 adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version);
2475 adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version);
2476 adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version);
2477 adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version);
2479 gfx_v7_0_cp_gfx_enable(adev, false);
2482 fw_data = (const __le32 *)
2483 (adev->gfx.pfp_fw->data +
2484 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2485 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2486 WREG32(mmCP_PFP_UCODE_ADDR, 0);
2487 for (i = 0; i < fw_size; i++)
2488 WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2489 WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2492 fw_data = (const __le32 *)
2493 (adev->gfx.ce_fw->data +
2494 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2495 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2496 WREG32(mmCP_CE_UCODE_ADDR, 0);
2497 for (i = 0; i < fw_size; i++)
2498 WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2499 WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2502 fw_data = (const __le32 *)
2503 (adev->gfx.me_fw->data +
2504 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2505 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2506 WREG32(mmCP_ME_RAM_WADDR, 0);
2507 for (i = 0; i < fw_size; i++)
2508 WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2509 WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2515 * gfx_v7_0_cp_gfx_start - start the gfx ring
2517 * @adev: amdgpu_device pointer
2519 * Enables the ring and loads the clear state context and other
2520 * packets required to init the ring.
2521 * Returns 0 for success, error for failure.
2523 static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev)
2525 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2526 const struct cs_section_def *sect = NULL;
2527 const struct cs_extent_def *ext = NULL;
2531 WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2532 WREG32(mmCP_ENDIAN_SWAP, 0);
2533 WREG32(mmCP_DEVICE_ID, 1);
2535 gfx_v7_0_cp_gfx_enable(adev, true);
2537 r = amdgpu_ring_alloc(ring, gfx_v7_0_get_csb_size(adev) + 8);
2539 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2543 /* init the CE partitions. CE only used for gfx on CIK */
2544 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2545 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2546 amdgpu_ring_write(ring, 0x8000);
2547 amdgpu_ring_write(ring, 0x8000);
2549 /* clear state buffer */
2550 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2551 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2553 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2554 amdgpu_ring_write(ring, 0x80000000);
2555 amdgpu_ring_write(ring, 0x80000000);
2557 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
2558 for (ext = sect->section; ext->extent != NULL; ++ext) {
2559 if (sect->id == SECT_CONTEXT) {
2560 amdgpu_ring_write(ring,
2561 PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
2562 amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2563 for (i = 0; i < ext->reg_count; i++)
2564 amdgpu_ring_write(ring, ext->extent[i]);
2569 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
2570 amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
2571 amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config);
2572 amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1);
2574 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2575 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2577 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2578 amdgpu_ring_write(ring, 0);
2580 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
2581 amdgpu_ring_write(ring, 0x00000316);
2582 amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
2583 amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
2585 amdgpu_ring_commit(ring);
2591 * gfx_v7_0_cp_gfx_resume - setup the gfx ring buffer registers
2593 * @adev: amdgpu_device pointer
2595 * Program the location and size of the gfx ring buffer
2596 * and test it to make sure it's working.
2597 * Returns 0 for success, error for failure.
2599 static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
2601 struct amdgpu_ring *ring;
2604 u64 rb_addr, rptr_addr;
2607 WREG32(mmCP_SEM_WAIT_TIMER, 0x0);
2608 if (adev->asic_type != CHIP_HAWAII)
2609 WREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
2611 /* Set the write pointer delay */
2612 WREG32(mmCP_RB_WPTR_DELAY, 0);
2614 /* set the RB to use vmid 0 */
2615 WREG32(mmCP_RB_VMID, 0);
2617 WREG32(mmSCRATCH_ADDR, 0);
2619 /* ring 0 - compute and gfx */
2620 /* Set ring buffer size */
2621 ring = &adev->gfx.gfx_ring[0];
2622 rb_bufsz = order_base_2(ring->ring_size / 8);
2623 tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2625 tmp |= 2 << CP_RB0_CNTL__BUF_SWAP__SHIFT;
2627 WREG32(mmCP_RB0_CNTL, tmp);
2629 /* Initialize the ring buffer's read and write pointers */
2630 WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
2632 WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2634 /* set the wb address wether it's enabled or not */
2635 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2636 WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2637 WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
2639 /* scratch register shadowing is no longer supported */
2640 WREG32(mmSCRATCH_UMSK, 0);
2643 WREG32(mmCP_RB0_CNTL, tmp);
2645 rb_addr = ring->gpu_addr >> 8;
2646 WREG32(mmCP_RB0_BASE, rb_addr);
2647 WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2649 /* start the ring */
2650 gfx_v7_0_cp_gfx_start(adev);
2651 r = amdgpu_ring_test_helper(ring);
2658 static u64 gfx_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
2660 return ring->adev->wb.wb[ring->rptr_offs];
2663 static u64 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
2665 struct amdgpu_device *adev = ring->adev;
2667 return RREG32(mmCP_RB0_WPTR);
2670 static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
2672 struct amdgpu_device *adev = ring->adev;
2674 WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2675 (void)RREG32(mmCP_RB0_WPTR);
2678 static u64 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
2680 /* XXX check if swapping is necessary on BE */
2681 return ring->adev->wb.wb[ring->wptr_offs];
2684 static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
2686 struct amdgpu_device *adev = ring->adev;
2688 /* XXX check if swapping is necessary on BE */
2689 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
2690 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
2694 * gfx_v7_0_cp_compute_enable - enable/disable the compute CP MEs
2696 * @adev: amdgpu_device pointer
2697 * @enable: enable or disable the MEs
2699 * Halts or unhalts the compute MEs.
2701 static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2706 WREG32(mmCP_MEC_CNTL, 0);
2708 WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2709 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2710 adev->gfx.compute_ring[i].sched.ready = false;
2716 * gfx_v7_0_cp_compute_load_microcode - load the compute CP ME ucode
2718 * @adev: amdgpu_device pointer
2720 * Loads the compute MEC1&2 ucode.
2721 * Returns 0 for success, -EINVAL if the ucode is not available.
2723 static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2725 const struct gfx_firmware_header_v1_0 *mec_hdr;
2726 const __le32 *fw_data;
2727 unsigned i, fw_size;
2729 if (!adev->gfx.mec_fw)
2732 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2733 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2734 adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
2735 adev->gfx.mec_feature_version = le32_to_cpu(
2736 mec_hdr->ucode_feature_version);
2738 gfx_v7_0_cp_compute_enable(adev, false);
2741 fw_data = (const __le32 *)
2742 (adev->gfx.mec_fw->data +
2743 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2744 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
2745 WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
2746 for (i = 0; i < fw_size; i++)
2747 WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++));
2748 WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
2750 if (adev->asic_type == CHIP_KAVERI) {
2751 const struct gfx_firmware_header_v1_0 *mec2_hdr;
2753 if (!adev->gfx.mec2_fw)
2756 mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
2757 amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
2758 adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
2759 adev->gfx.mec2_feature_version = le32_to_cpu(
2760 mec2_hdr->ucode_feature_version);
2763 fw_data = (const __le32 *)
2764 (adev->gfx.mec2_fw->data +
2765 le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
2766 fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
2767 WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
2768 for (i = 0; i < fw_size; i++)
2769 WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++));
2770 WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
2777 * gfx_v7_0_cp_compute_fini - stop the compute queues
2779 * @adev: amdgpu_device pointer
2781 * Stop the compute queues and tear down the driver queue
2784 static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev)
2788 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2789 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2791 amdgpu_bo_free_kernel(&ring->mqd_obj, NULL, NULL);
2795 static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
2797 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
2800 static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
2804 size_t mec_hpd_size;
2806 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
2808 /* take ownership of the relevant compute queues */
2809 amdgpu_gfx_compute_queue_acquire(adev);
2811 /* allocate space for ALL pipes (even the ones we don't own) */
2812 mec_hpd_size = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec
2813 * GFX7_MEC_HPD_SIZE * 2;
2815 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
2816 AMDGPU_GEM_DOMAIN_VRAM,
2817 &adev->gfx.mec.hpd_eop_obj,
2818 &adev->gfx.mec.hpd_eop_gpu_addr,
2821 dev_warn(adev->dev, "(%d) create, pin or map of HDP EOP bo failed\n", r);
2822 gfx_v7_0_mec_fini(adev);
2826 /* clear memory. Not sure if this is required or not */
2827 memset(hpd, 0, mec_hpd_size);
2829 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
2830 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
2835 struct hqd_registers
2837 u32 cp_mqd_base_addr;
2838 u32 cp_mqd_base_addr_hi;
2841 u32 cp_hqd_persistent_state;
2842 u32 cp_hqd_pipe_priority;
2843 u32 cp_hqd_queue_priority;
2846 u32 cp_hqd_pq_base_hi;
2848 u32 cp_hqd_pq_rptr_report_addr;
2849 u32 cp_hqd_pq_rptr_report_addr_hi;
2850 u32 cp_hqd_pq_wptr_poll_addr;
2851 u32 cp_hqd_pq_wptr_poll_addr_hi;
2852 u32 cp_hqd_pq_doorbell_control;
2854 u32 cp_hqd_pq_control;
2855 u32 cp_hqd_ib_base_addr;
2856 u32 cp_hqd_ib_base_addr_hi;
2858 u32 cp_hqd_ib_control;
2859 u32 cp_hqd_iq_timer;
2861 u32 cp_hqd_dequeue_request;
2862 u32 cp_hqd_dma_offload;
2863 u32 cp_hqd_sema_cmd;
2864 u32 cp_hqd_msg_type;
2865 u32 cp_hqd_atomic0_preop_lo;
2866 u32 cp_hqd_atomic0_preop_hi;
2867 u32 cp_hqd_atomic1_preop_lo;
2868 u32 cp_hqd_atomic1_preop_hi;
2869 u32 cp_hqd_hq_scheduler0;
2870 u32 cp_hqd_hq_scheduler1;
2874 static void gfx_v7_0_compute_pipe_init(struct amdgpu_device *adev,
2879 size_t eop_offset = (mec * adev->gfx.mec.num_pipe_per_mec + pipe)
2880 * GFX7_MEC_HPD_SIZE * 2;
2882 mutex_lock(&adev->srbm_mutex);
2883 eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + eop_offset;
2885 cik_srbm_select(adev, mec + 1, pipe, 0, 0);
2887 /* write the EOP addr */
2888 WREG32(mmCP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
2889 WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
2891 /* set the VMID assigned */
2892 WREG32(mmCP_HPD_EOP_VMID, 0);
2894 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2895 tmp = RREG32(mmCP_HPD_EOP_CONTROL);
2896 tmp &= ~CP_HPD_EOP_CONTROL__EOP_SIZE_MASK;
2897 tmp |= order_base_2(GFX7_MEC_HPD_SIZE / 8);
2898 WREG32(mmCP_HPD_EOP_CONTROL, tmp);
2900 cik_srbm_select(adev, 0, 0, 0, 0);
2901 mutex_unlock(&adev->srbm_mutex);
2904 static int gfx_v7_0_mqd_deactivate(struct amdgpu_device *adev)
2908 /* disable the queue if it's active */
2909 if (RREG32(mmCP_HQD_ACTIVE) & 1) {
2910 WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
2911 for (i = 0; i < adev->usec_timeout; i++) {
2912 if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
2917 if (i == adev->usec_timeout)
2920 WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
2921 WREG32(mmCP_HQD_PQ_RPTR, 0);
2922 WREG32(mmCP_HQD_PQ_WPTR, 0);
2928 static void gfx_v7_0_mqd_init(struct amdgpu_device *adev,
2929 struct cik_mqd *mqd,
2930 uint64_t mqd_gpu_addr,
2931 struct amdgpu_ring *ring)
2936 /* init the mqd struct */
2937 memset(mqd, 0, sizeof(struct cik_mqd));
2939 mqd->header = 0xC0310800;
2940 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2941 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2942 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2943 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2945 /* enable doorbell? */
2946 mqd->cp_hqd_pq_doorbell_control =
2947 RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
2948 if (ring->use_doorbell)
2949 mqd->cp_hqd_pq_doorbell_control |= CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
2951 mqd->cp_hqd_pq_doorbell_control &= ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
2953 /* set the pointer to the MQD */
2954 mqd->cp_mqd_base_addr_lo = mqd_gpu_addr & 0xfffffffc;
2955 mqd->cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
2957 /* set MQD vmid to 0 */
2958 mqd->cp_mqd_control = RREG32(mmCP_MQD_CONTROL);
2959 mqd->cp_mqd_control &= ~CP_MQD_CONTROL__VMID_MASK;
2961 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2962 hqd_gpu_addr = ring->gpu_addr >> 8;
2963 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2964 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2966 /* set up the HQD, this is similar to CP_RB0_CNTL */
2967 mqd->cp_hqd_pq_control = RREG32(mmCP_HQD_PQ_CONTROL);
2968 mqd->cp_hqd_pq_control &=
2969 ~(CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK |
2970 CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK);
2972 mqd->cp_hqd_pq_control |=
2973 order_base_2(ring->ring_size / 8);
2974 mqd->cp_hqd_pq_control |=
2975 (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8);
2977 mqd->cp_hqd_pq_control |=
2978 2 << CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT;
2980 mqd->cp_hqd_pq_control &=
2981 ~(CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK |
2982 CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK |
2983 CP_HQD_PQ_CONTROL__PQ_VOLATILE_MASK);
2984 mqd->cp_hqd_pq_control |=
2985 CP_HQD_PQ_CONTROL__PRIV_STATE_MASK |
2986 CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK; /* assuming kernel queue control */
2988 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2989 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2990 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2991 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2993 /* set the wb address wether it's enabled or not */
2994 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2995 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2996 mqd->cp_hqd_pq_rptr_report_addr_hi =
2997 upper_32_bits(wb_gpu_addr) & 0xffff;
2999 /* enable the doorbell if requested */
3000 if (ring->use_doorbell) {
3001 mqd->cp_hqd_pq_doorbell_control =
3002 RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
3003 mqd->cp_hqd_pq_doorbell_control &=
3004 ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK;
3005 mqd->cp_hqd_pq_doorbell_control |=
3006 (ring->doorbell_index <<
3007 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT);
3008 mqd->cp_hqd_pq_doorbell_control |=
3009 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
3010 mqd->cp_hqd_pq_doorbell_control &=
3011 ~(CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK |
3012 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK);
3015 mqd->cp_hqd_pq_doorbell_control = 0;
3018 /* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3020 mqd->cp_hqd_pq_wptr = lower_32_bits(ring->wptr);
3021 mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
3023 /* set the vmid for the queue */
3024 mqd->cp_hqd_vmid = 0;
3027 mqd->cp_hqd_ib_control = RREG32(mmCP_HQD_IB_CONTROL);
3028 mqd->cp_hqd_ib_base_addr_lo = RREG32(mmCP_HQD_IB_BASE_ADDR);
3029 mqd->cp_hqd_ib_base_addr_hi = RREG32(mmCP_HQD_IB_BASE_ADDR_HI);
3030 mqd->cp_hqd_ib_rptr = RREG32(mmCP_HQD_IB_RPTR);
3031 mqd->cp_hqd_persistent_state = RREG32(mmCP_HQD_PERSISTENT_STATE);
3032 mqd->cp_hqd_sema_cmd = RREG32(mmCP_HQD_SEMA_CMD);
3033 mqd->cp_hqd_msg_type = RREG32(mmCP_HQD_MSG_TYPE);
3034 mqd->cp_hqd_atomic0_preop_lo = RREG32(mmCP_HQD_ATOMIC0_PREOP_LO);
3035 mqd->cp_hqd_atomic0_preop_hi = RREG32(mmCP_HQD_ATOMIC0_PREOP_HI);
3036 mqd->cp_hqd_atomic1_preop_lo = RREG32(mmCP_HQD_ATOMIC1_PREOP_LO);
3037 mqd->cp_hqd_atomic1_preop_hi = RREG32(mmCP_HQD_ATOMIC1_PREOP_HI);
3038 mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
3039 mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
3040 mqd->cp_hqd_pipe_priority = RREG32(mmCP_HQD_PIPE_PRIORITY);
3041 mqd->cp_hqd_queue_priority = RREG32(mmCP_HQD_QUEUE_PRIORITY);
3042 mqd->cp_hqd_iq_rptr = RREG32(mmCP_HQD_IQ_RPTR);
3044 /* activate the queue */
3045 mqd->cp_hqd_active = 1;
3048 int gfx_v7_0_mqd_commit(struct amdgpu_device *adev, struct cik_mqd *mqd)
3054 /* HQD registers extend from mmCP_MQD_BASE_ADDR to mmCP_MQD_CONTROL */
3055 mqd_data = &mqd->cp_mqd_base_addr_lo;
3057 /* disable wptr polling */
3058 tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
3059 tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3060 WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
3062 /* program all HQD registers */
3063 for (mqd_reg = mmCP_HQD_VMID; mqd_reg <= mmCP_MQD_CONTROL; mqd_reg++)
3064 WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
3066 /* activate the HQD */
3067 for (mqd_reg = mmCP_MQD_BASE_ADDR; mqd_reg <= mmCP_HQD_ACTIVE; mqd_reg++)
3068 WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
3073 static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id)
3077 struct cik_mqd *mqd;
3078 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
3080 r = amdgpu_bo_create_reserved(adev, sizeof(struct cik_mqd), PAGE_SIZE,
3081 AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
3082 &mqd_gpu_addr, (void **)&mqd);
3084 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
3088 mutex_lock(&adev->srbm_mutex);
3089 cik_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3091 gfx_v7_0_mqd_init(adev, mqd, mqd_gpu_addr, ring);
3092 gfx_v7_0_mqd_deactivate(adev);
3093 gfx_v7_0_mqd_commit(adev, mqd);
3095 cik_srbm_select(adev, 0, 0, 0, 0);
3096 mutex_unlock(&adev->srbm_mutex);
3098 amdgpu_bo_kunmap(ring->mqd_obj);
3099 amdgpu_bo_unreserve(ring->mqd_obj);
3104 * gfx_v7_0_cp_compute_resume - setup the compute queue registers
3106 * @adev: amdgpu_device pointer
3108 * Program the compute queues and test them to make sure they
3110 * Returns 0 for success, error for failure.
3112 static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
3116 struct amdgpu_ring *ring;
3118 /* fix up chicken bits */
3119 tmp = RREG32(mmCP_CPF_DEBUG);
3121 WREG32(mmCP_CPF_DEBUG, tmp);
3123 /* init all pipes (even the ones we don't own) */
3124 for (i = 0; i < adev->gfx.mec.num_mec; i++)
3125 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++)
3126 gfx_v7_0_compute_pipe_init(adev, i, j);
3128 /* init the queues */
3129 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3130 r = gfx_v7_0_compute_queue_init(adev, i);
3132 gfx_v7_0_cp_compute_fini(adev);
3137 gfx_v7_0_cp_compute_enable(adev, true);
3139 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3140 ring = &adev->gfx.compute_ring[i];
3141 amdgpu_ring_test_helper(ring);
3147 static void gfx_v7_0_cp_enable(struct amdgpu_device *adev, bool enable)
3149 gfx_v7_0_cp_gfx_enable(adev, enable);
3150 gfx_v7_0_cp_compute_enable(adev, enable);
3153 static int gfx_v7_0_cp_load_microcode(struct amdgpu_device *adev)
3157 r = gfx_v7_0_cp_gfx_load_microcode(adev);
3160 r = gfx_v7_0_cp_compute_load_microcode(adev);
3167 static void gfx_v7_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
3170 u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
3173 tmp |= (CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK |
3174 CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK);
3176 tmp &= ~(CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK |
3177 CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK);
3178 WREG32(mmCP_INT_CNTL_RING0, tmp);
3181 static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
3185 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3187 r = gfx_v7_0_cp_load_microcode(adev);
3191 r = gfx_v7_0_cp_gfx_resume(adev);
3194 r = gfx_v7_0_cp_compute_resume(adev);
3198 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3204 * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
3206 * @ring: the ring to emmit the commands to
3208 * Sync the command pipeline with the PFP. E.g. wait for everything
3211 static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3213 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3214 uint32_t seq = ring->fence_drv.sync_seq;
3215 uint64_t addr = ring->fence_drv.gpu_addr;
3217 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3218 amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
3219 WAIT_REG_MEM_FUNCTION(3) | /* equal */
3220 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
3221 amdgpu_ring_write(ring, addr & 0xfffffffc);
3222 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
3223 amdgpu_ring_write(ring, seq);
3224 amdgpu_ring_write(ring, 0xffffffff);
3225 amdgpu_ring_write(ring, 4); /* poll interval */
3228 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
3229 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3230 amdgpu_ring_write(ring, 0);
3231 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3232 amdgpu_ring_write(ring, 0);
3238 * VMID 0 is the physical GPU addresses as used by the kernel.
3239 * VMIDs 1-15 are used for userspace clients and are handled
3240 * by the amdgpu vm/hsa code.
3243 * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
3245 * @adev: amdgpu_device pointer
3247 * Update the page table base and flush the VM TLB
3248 * using the CP (CIK).
3250 static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3251 unsigned vmid, uint64_t pd_addr)
3253 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3255 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
3257 /* wait for the invalidate to complete */
3258 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3259 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
3260 WAIT_REG_MEM_FUNCTION(0) | /* always */
3261 WAIT_REG_MEM_ENGINE(0))); /* me */
3262 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
3263 amdgpu_ring_write(ring, 0);
3264 amdgpu_ring_write(ring, 0); /* ref */
3265 amdgpu_ring_write(ring, 0); /* mask */
3266 amdgpu_ring_write(ring, 0x20); /* poll interval */
3268 /* compute doesn't have PFP */
3270 /* sync PFP to ME, otherwise we might get invalid PFP reads */
3271 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3272 amdgpu_ring_write(ring, 0x0);
3274 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
3275 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3276 amdgpu_ring_write(ring, 0);
3277 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3278 amdgpu_ring_write(ring, 0);
3282 static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
3283 uint32_t reg, uint32_t val)
3285 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3287 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3288 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
3289 WRITE_DATA_DST_SEL(0)));
3290 amdgpu_ring_write(ring, reg);
3291 amdgpu_ring_write(ring, 0);
3292 amdgpu_ring_write(ring, val);
3297 * The RLC is a multi-purpose microengine that handles a
3298 * variety of functions.
3300 static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3304 const struct cs_section_def *cs_data;
3307 /* allocate rlc buffers */
3308 if (adev->flags & AMD_IS_APU) {
3309 if (adev->asic_type == CHIP_KAVERI) {
3310 adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list;
3311 adev->gfx.rlc.reg_list_size =
3312 (u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
3314 adev->gfx.rlc.reg_list = kalindi_rlc_save_restore_register_list;
3315 adev->gfx.rlc.reg_list_size =
3316 (u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
3319 adev->gfx.rlc.cs_data = ci_cs_data;
3320 adev->gfx.rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */
3321 adev->gfx.rlc.cp_table_size += 64 * 1024; /* GDS */
3323 src_ptr = adev->gfx.rlc.reg_list;
3324 dws = adev->gfx.rlc.reg_list_size;
3325 dws += (5 * 16) + 48 + 48 + 64;
3327 cs_data = adev->gfx.rlc.cs_data;
3330 /* init save restore block */
3331 r = amdgpu_gfx_rlc_init_sr(adev, dws);
3337 /* init clear state block */
3338 r = amdgpu_gfx_rlc_init_csb(adev);
3343 if (adev->gfx.rlc.cp_table_size) {
3344 r = amdgpu_gfx_rlc_init_cpt(adev);
3349 /* init spm vmid with 0xf */
3350 if (adev->gfx.rlc.funcs->update_spm_vmid)
3351 adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
3356 static void gfx_v7_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
3360 tmp = RREG32(mmRLC_LB_CNTL);
3362 tmp |= RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK;
3364 tmp &= ~RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK;
3365 WREG32(mmRLC_LB_CNTL, tmp);
3368 static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
3373 mutex_lock(&adev->grbm_idx_mutex);
3374 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3375 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3376 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
3377 for (k = 0; k < adev->usec_timeout; k++) {
3378 if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
3384 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3385 mutex_unlock(&adev->grbm_idx_mutex);
3387 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
3388 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
3389 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
3390 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
3391 for (k = 0; k < adev->usec_timeout; k++) {
3392 if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
3398 static void gfx_v7_0_update_rlc(struct amdgpu_device *adev, u32 rlc)
3402 tmp = RREG32(mmRLC_CNTL);
3404 WREG32(mmRLC_CNTL, rlc);
3407 static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
3411 orig = data = RREG32(mmRLC_CNTL);
3413 if (data & RLC_CNTL__RLC_ENABLE_F32_MASK) {
3416 data &= ~RLC_CNTL__RLC_ENABLE_F32_MASK;
3417 WREG32(mmRLC_CNTL, data);
3419 for (i = 0; i < adev->usec_timeout; i++) {
3420 if ((RREG32(mmRLC_GPM_STAT) & RLC_GPM_STAT__RLC_BUSY_MASK) == 0)
3425 gfx_v7_0_wait_for_rlc_serdes(adev);
3431 static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev)
3436 static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev)
3440 tmp = 0x1 | (1 << 1);
3441 WREG32(mmRLC_GPR_REG2, tmp);
3443 mask = RLC_GPM_STAT__GFX_POWER_STATUS_MASK |
3444 RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK;
3445 for (i = 0; i < adev->usec_timeout; i++) {
3446 if ((RREG32(mmRLC_GPM_STAT) & mask) == mask)
3451 for (i = 0; i < adev->usec_timeout; i++) {
3452 if ((RREG32(mmRLC_GPR_REG2) & 0x1) == 0)
3458 static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev)
3462 tmp = 0x1 | (0 << 1);
3463 WREG32(mmRLC_GPR_REG2, tmp);
3467 * gfx_v7_0_rlc_stop - stop the RLC ME
3469 * @adev: amdgpu_device pointer
3471 * Halt the RLC ME (MicroEngine) (CIK).
3473 static void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
3475 WREG32(mmRLC_CNTL, 0);
3477 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3479 gfx_v7_0_wait_for_rlc_serdes(adev);
3483 * gfx_v7_0_rlc_start - start the RLC ME
3485 * @adev: amdgpu_device pointer
3487 * Unhalt the RLC ME (MicroEngine) (CIK).
3489 static void gfx_v7_0_rlc_start(struct amdgpu_device *adev)
3491 WREG32(mmRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK);
3493 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3498 static void gfx_v7_0_rlc_reset(struct amdgpu_device *adev)
3500 u32 tmp = RREG32(mmGRBM_SOFT_RESET);
3502 tmp |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
3503 WREG32(mmGRBM_SOFT_RESET, tmp);
3505 tmp &= ~GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
3506 WREG32(mmGRBM_SOFT_RESET, tmp);
3511 * gfx_v7_0_rlc_resume - setup the RLC hw
3513 * @adev: amdgpu_device pointer
3515 * Initialize the RLC registers, load the ucode,
3516 * and start the RLC (CIK).
3517 * Returns 0 for success, -EINVAL if the ucode is not available.
3519 static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
3521 const struct rlc_firmware_header_v1_0 *hdr;
3522 const __le32 *fw_data;
3523 unsigned i, fw_size;
3526 if (!adev->gfx.rlc_fw)
3529 hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
3530 amdgpu_ucode_print_rlc_hdr(&hdr->header);
3531 adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
3532 adev->gfx.rlc_feature_version = le32_to_cpu(
3533 hdr->ucode_feature_version);
3535 adev->gfx.rlc.funcs->stop(adev);
3538 tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc;
3539 WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
3541 adev->gfx.rlc.funcs->reset(adev);
3543 gfx_v7_0_init_pg(adev);
3545 WREG32(mmRLC_LB_CNTR_INIT, 0);
3546 WREG32(mmRLC_LB_CNTR_MAX, 0x00008000);
3548 mutex_lock(&adev->grbm_idx_mutex);
3549 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3550 WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff);
3551 WREG32(mmRLC_LB_PARAMS, 0x00600408);
3552 WREG32(mmRLC_LB_CNTL, 0x80000004);
3553 mutex_unlock(&adev->grbm_idx_mutex);
3555 WREG32(mmRLC_MC_CNTL, 0);
3556 WREG32(mmRLC_UCODE_CNTL, 0);
3558 fw_data = (const __le32 *)
3559 (adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3560 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
3561 WREG32(mmRLC_GPM_UCODE_ADDR, 0);
3562 for (i = 0; i < fw_size; i++)
3563 WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
3564 WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
3566 /* XXX - find out what chips support lbpw */
3567 gfx_v7_0_enable_lbpw(adev, false);
3569 if (adev->asic_type == CHIP_BONAIRE)
3570 WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0);
3572 adev->gfx.rlc.funcs->start(adev);
3577 static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
3581 data = RREG32(mmRLC_SPM_VMID);
3583 data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
3584 data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
3586 WREG32(mmRLC_SPM_VMID, data);
3589 static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
3591 u32 data, orig, tmp, tmp2;
3593 orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
3595 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3596 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3598 tmp = gfx_v7_0_halt_rlc(adev);
3600 mutex_lock(&adev->grbm_idx_mutex);
3601 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3602 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3603 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3604 tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
3605 RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_0_MASK |
3606 RLC_SERDES_WR_CTRL__CGLS_ENABLE_MASK;
3607 WREG32(mmRLC_SERDES_WR_CTRL, tmp2);
3608 mutex_unlock(&adev->grbm_idx_mutex);
3610 gfx_v7_0_update_rlc(adev, tmp);
3612 data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3614 WREG32(mmRLC_CGCG_CGLS_CTRL, data);
3617 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3619 RREG32(mmCB_CGTT_SCLK_CTRL);
3620 RREG32(mmCB_CGTT_SCLK_CTRL);
3621 RREG32(mmCB_CGTT_SCLK_CTRL);
3622 RREG32(mmCB_CGTT_SCLK_CTRL);
3624 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
3626 WREG32(mmRLC_CGCG_CGLS_CTRL, data);
3628 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3632 static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
3634 u32 data, orig, tmp = 0;
3636 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3637 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3638 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3639 orig = data = RREG32(mmCP_MEM_SLP_CNTL);
3640 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3642 WREG32(mmCP_MEM_SLP_CNTL, data);
3646 orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
3650 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
3652 tmp = gfx_v7_0_halt_rlc(adev);
3654 mutex_lock(&adev->grbm_idx_mutex);
3655 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3656 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3657 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3658 data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
3659 RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_0_MASK;
3660 WREG32(mmRLC_SERDES_WR_CTRL, data);
3661 mutex_unlock(&adev->grbm_idx_mutex);
3663 gfx_v7_0_update_rlc(adev, tmp);
3665 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
3666 orig = data = RREG32(mmCGTS_SM_CTRL_REG);
3667 data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK;
3668 data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
3669 data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
3670 data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
3671 if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
3672 (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
3673 data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
3674 data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK;
3675 data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
3676 data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
3678 WREG32(mmCGTS_SM_CTRL_REG, data);
3681 orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
3684 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
3686 data = RREG32(mmRLC_MEM_SLP_CNTL);
3687 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
3688 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3689 WREG32(mmRLC_MEM_SLP_CNTL, data);
3692 data = RREG32(mmCP_MEM_SLP_CNTL);
3693 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
3694 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3695 WREG32(mmCP_MEM_SLP_CNTL, data);
3698 orig = data = RREG32(mmCGTS_SM_CTRL_REG);
3699 data |= CGTS_SM_CTRL_REG__OVERRIDE_MASK | CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
3701 WREG32(mmCGTS_SM_CTRL_REG, data);
3703 tmp = gfx_v7_0_halt_rlc(adev);
3705 mutex_lock(&adev->grbm_idx_mutex);
3706 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3707 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3708 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3709 data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK;
3710 WREG32(mmRLC_SERDES_WR_CTRL, data);
3711 mutex_unlock(&adev->grbm_idx_mutex);
3713 gfx_v7_0_update_rlc(adev, tmp);
3717 static void gfx_v7_0_update_cg(struct amdgpu_device *adev,
3720 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3721 /* order matters! */
3723 gfx_v7_0_enable_mgcg(adev, true);
3724 gfx_v7_0_enable_cgcg(adev, true);
3726 gfx_v7_0_enable_cgcg(adev, false);
3727 gfx_v7_0_enable_mgcg(adev, false);
3729 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3732 static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
3737 orig = data = RREG32(mmRLC_PG_CNTL);
3738 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
3739 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
3741 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
3743 WREG32(mmRLC_PG_CNTL, data);
3746 static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
3751 orig = data = RREG32(mmRLC_PG_CNTL);
3752 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
3753 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
3755 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
3757 WREG32(mmRLC_PG_CNTL, data);
3760 static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
3764 orig = data = RREG32(mmRLC_PG_CNTL);
3765 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
3770 WREG32(mmRLC_PG_CNTL, data);
3773 static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
3777 orig = data = RREG32(mmRLC_PG_CNTL);
3778 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GDS))
3783 WREG32(mmRLC_PG_CNTL, data);
3786 static int gfx_v7_0_cp_pg_table_num(struct amdgpu_device *adev)
3788 if (adev->asic_type == CHIP_KAVERI)
3794 static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
3799 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
3800 orig = data = RREG32(mmRLC_PG_CNTL);
3801 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
3803 WREG32(mmRLC_PG_CNTL, data);
3805 orig = data = RREG32(mmRLC_AUTO_PG_CTRL);
3806 data |= RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK;
3808 WREG32(mmRLC_AUTO_PG_CTRL, data);
3810 orig = data = RREG32(mmRLC_PG_CNTL);
3811 data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
3813 WREG32(mmRLC_PG_CNTL, data);
3815 orig = data = RREG32(mmRLC_AUTO_PG_CTRL);
3816 data &= ~RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK;
3818 WREG32(mmRLC_AUTO_PG_CTRL, data);
3820 data = RREG32(mmDB_RENDER_CONTROL);
3824 static void gfx_v7_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
3832 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
3833 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
3835 WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
3838 static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev)
3842 data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
3843 data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
3845 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
3846 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
3848 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
3850 return (~data) & mask;
3853 static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev)
3857 WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
3859 tmp = RREG32(mmRLC_MAX_PG_CU);
3860 tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK;
3861 tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
3862 WREG32(mmRLC_MAX_PG_CU, tmp);
3865 static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
3870 orig = data = RREG32(mmRLC_PG_CNTL);
3871 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
3872 data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
3874 data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
3876 WREG32(mmRLC_PG_CNTL, data);
3879 static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
3884 orig = data = RREG32(mmRLC_PG_CNTL);
3885 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
3886 data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
3888 data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
3890 WREG32(mmRLC_PG_CNTL, data);
3893 #define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
3894 #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
3896 static void gfx_v7_0_init_gfx_cgpg(struct amdgpu_device *adev)
3901 if (adev->gfx.rlc.cs_data) {
3902 WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
3903 WREG32(mmRLC_GPM_SCRATCH_DATA, upper_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
3904 WREG32(mmRLC_GPM_SCRATCH_DATA, lower_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
3905 WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.clear_state_size);
3907 WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
3908 for (i = 0; i < 3; i++)
3909 WREG32(mmRLC_GPM_SCRATCH_DATA, 0);
3911 if (adev->gfx.rlc.reg_list) {
3912 WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
3913 for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
3914 WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.reg_list[i]);
3917 orig = data = RREG32(mmRLC_PG_CNTL);
3918 data |= RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK;
3920 WREG32(mmRLC_PG_CNTL, data);
3922 WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
3923 WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
3925 data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
3926 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
3927 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3928 WREG32(mmCP_RB_WPTR_POLL_CNTL, data);
3931 WREG32(mmRLC_PG_DELAY, data);
3933 data = RREG32(mmRLC_PG_DELAY_2);
3936 WREG32(mmRLC_PG_DELAY_2, data);
3938 data = RREG32(mmRLC_AUTO_PG_CTRL);
3939 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
3940 data |= (0x700 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
3941 WREG32(mmRLC_AUTO_PG_CTRL, data);
3945 static void gfx_v7_0_update_gfx_pg(struct amdgpu_device *adev, bool enable)
3947 gfx_v7_0_enable_gfx_cgpg(adev, enable);
3948 gfx_v7_0_enable_gfx_static_mgpg(adev, enable);
3949 gfx_v7_0_enable_gfx_dynamic_mgpg(adev, enable);
3952 static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev)
3955 const struct cs_section_def *sect = NULL;
3956 const struct cs_extent_def *ext = NULL;
3958 if (adev->gfx.rlc.cs_data == NULL)
3961 /* begin clear state */
3963 /* context control state */
3966 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
3967 for (ext = sect->section; ext->extent != NULL; ++ext) {
3968 if (sect->id == SECT_CONTEXT)
3969 count += 2 + ext->reg_count;
3974 /* pa_sc_raster_config/pa_sc_raster_config1 */
3976 /* end clear state */
3984 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
3985 volatile u32 *buffer)
3988 const struct cs_section_def *sect = NULL;
3989 const struct cs_extent_def *ext = NULL;
3991 if (adev->gfx.rlc.cs_data == NULL)
3996 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3997 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3999 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4000 buffer[count++] = cpu_to_le32(0x80000000);
4001 buffer[count++] = cpu_to_le32(0x80000000);
4003 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
4004 for (ext = sect->section; ext->extent != NULL; ++ext) {
4005 if (sect->id == SECT_CONTEXT) {
4007 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
4008 buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
4009 for (i = 0; i < ext->reg_count; i++)
4010 buffer[count++] = cpu_to_le32(ext->extent[i]);
4017 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
4018 buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
4019 switch (adev->asic_type) {
4021 buffer[count++] = cpu_to_le32(0x16000012);
4022 buffer[count++] = cpu_to_le32(0x00000000);
4025 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
4026 buffer[count++] = cpu_to_le32(0x00000000);
4030 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
4031 buffer[count++] = cpu_to_le32(0x00000000);
4034 buffer[count++] = cpu_to_le32(0x3a00161a);
4035 buffer[count++] = cpu_to_le32(0x0000002e);
4038 buffer[count++] = cpu_to_le32(0x00000000);
4039 buffer[count++] = cpu_to_le32(0x00000000);
4043 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4044 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
4046 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
4047 buffer[count++] = cpu_to_le32(0);
4050 static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
4052 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4053 AMD_PG_SUPPORT_GFX_SMG |
4054 AMD_PG_SUPPORT_GFX_DMG |
4056 AMD_PG_SUPPORT_GDS |
4057 AMD_PG_SUPPORT_RLC_SMU_HS)) {
4058 gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true);
4059 gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true);
4060 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4061 gfx_v7_0_init_gfx_cgpg(adev);
4062 gfx_v7_0_enable_cp_pg(adev, true);
4063 gfx_v7_0_enable_gds_pg(adev, true);
4065 gfx_v7_0_init_ao_cu_mask(adev);
4066 gfx_v7_0_update_gfx_pg(adev, true);
4070 static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
4072 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4073 AMD_PG_SUPPORT_GFX_SMG |
4074 AMD_PG_SUPPORT_GFX_DMG |
4076 AMD_PG_SUPPORT_GDS |
4077 AMD_PG_SUPPORT_RLC_SMU_HS)) {
4078 gfx_v7_0_update_gfx_pg(adev, false);
4079 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4080 gfx_v7_0_enable_cp_pg(adev, false);
4081 gfx_v7_0_enable_gds_pg(adev, false);
4087 * gfx_v7_0_get_gpu_clock_counter - return GPU clock counter snapshot
4089 * @adev: amdgpu_device pointer
4091 * Fetches a GPU clock counter snapshot (SI).
4092 * Returns the 64 bit clock counter snapshot.
4094 static uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4098 mutex_lock(&adev->gfx.gpu_clock_mutex);
4099 WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4100 clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
4101 ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4102 mutex_unlock(&adev->gfx.gpu_clock_mutex);
4106 static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4108 uint32_t gds_base, uint32_t gds_size,
4109 uint32_t gws_base, uint32_t gws_size,
4110 uint32_t oa_base, uint32_t oa_size)
4113 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4114 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4115 WRITE_DATA_DST_SEL(0)));
4116 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
4117 amdgpu_ring_write(ring, 0);
4118 amdgpu_ring_write(ring, gds_base);
4121 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4122 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4123 WRITE_DATA_DST_SEL(0)));
4124 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
4125 amdgpu_ring_write(ring, 0);
4126 amdgpu_ring_write(ring, gds_size);
4129 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4130 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4131 WRITE_DATA_DST_SEL(0)));
4132 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
4133 amdgpu_ring_write(ring, 0);
4134 amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4137 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4138 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4139 WRITE_DATA_DST_SEL(0)));
4140 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
4141 amdgpu_ring_write(ring, 0);
4142 amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
4145 static void gfx_v7_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
4147 struct amdgpu_device *adev = ring->adev;
4150 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
4151 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
4152 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
4153 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
4154 WREG32(mmSQ_CMD, value);
4157 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
4159 WREG32(mmSQ_IND_INDEX,
4160 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
4161 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
4162 (address << SQ_IND_INDEX__INDEX__SHIFT) |
4163 (SQ_IND_INDEX__FORCE_READ_MASK));
4164 return RREG32(mmSQ_IND_DATA);
4167 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
4168 uint32_t wave, uint32_t thread,
4169 uint32_t regno, uint32_t num, uint32_t *out)
4171 WREG32(mmSQ_IND_INDEX,
4172 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
4173 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
4174 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
4175 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
4176 (SQ_IND_INDEX__FORCE_READ_MASK) |
4177 (SQ_IND_INDEX__AUTO_INCR_MASK));
4179 *(out++) = RREG32(mmSQ_IND_DATA);
4182 static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
4184 /* type 0 wave data */
4185 dst[(*no_fields)++] = 0;
4186 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
4187 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
4188 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
4189 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
4190 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
4191 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
4192 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
4193 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
4194 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
4195 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
4196 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
4197 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
4198 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
4199 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
4200 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
4201 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
4202 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
4203 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
4206 static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
4207 uint32_t wave, uint32_t start,
4208 uint32_t size, uint32_t *dst)
4211 adev, simd, wave, 0,
4212 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
4215 static void gfx_v7_0_select_me_pipe_q(struct amdgpu_device *adev,
4216 u32 me, u32 pipe, u32 q, u32 vm)
4218 cik_srbm_select(adev, me, pipe, q, vm);
4221 static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
4222 .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
4223 .select_se_sh = &gfx_v7_0_select_se_sh,
4224 .read_wave_data = &gfx_v7_0_read_wave_data,
4225 .read_wave_sgprs = &gfx_v7_0_read_wave_sgprs,
4226 .select_me_pipe_q = &gfx_v7_0_select_me_pipe_q
4229 static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
4230 .is_rlc_enabled = gfx_v7_0_is_rlc_enabled,
4231 .set_safe_mode = gfx_v7_0_set_safe_mode,
4232 .unset_safe_mode = gfx_v7_0_unset_safe_mode,
4233 .init = gfx_v7_0_rlc_init,
4234 .get_csb_size = gfx_v7_0_get_csb_size,
4235 .get_csb_buffer = gfx_v7_0_get_csb_buffer,
4236 .get_cp_table_num = gfx_v7_0_cp_pg_table_num,
4237 .resume = gfx_v7_0_rlc_resume,
4238 .stop = gfx_v7_0_rlc_stop,
4239 .reset = gfx_v7_0_rlc_reset,
4240 .start = gfx_v7_0_rlc_start,
4241 .update_spm_vmid = gfx_v7_0_update_spm_vmid
4244 static int gfx_v7_0_early_init(void *handle)
4246 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4248 adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
4249 adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
4250 adev->gfx.funcs = &gfx_v7_0_gfx_funcs;
4251 adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs;
4252 gfx_v7_0_set_ring_funcs(adev);
4253 gfx_v7_0_set_irq_funcs(adev);
4254 gfx_v7_0_set_gds_init(adev);
4259 static int gfx_v7_0_late_init(void *handle)
4261 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4264 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4268 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4275 static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
4279 u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
4282 switch (adev->asic_type) {
4284 adev->gfx.config.max_shader_engines = 2;
4285 adev->gfx.config.max_tile_pipes = 4;
4286 adev->gfx.config.max_cu_per_sh = 7;
4287 adev->gfx.config.max_sh_per_se = 1;
4288 adev->gfx.config.max_backends_per_se = 2;
4289 adev->gfx.config.max_texture_channel_caches = 4;
4290 adev->gfx.config.max_gprs = 256;
4291 adev->gfx.config.max_gs_threads = 32;
4292 adev->gfx.config.max_hw_contexts = 8;
4294 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4295 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4296 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4297 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4298 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4301 adev->gfx.config.max_shader_engines = 4;
4302 adev->gfx.config.max_tile_pipes = 16;
4303 adev->gfx.config.max_cu_per_sh = 11;
4304 adev->gfx.config.max_sh_per_se = 1;
4305 adev->gfx.config.max_backends_per_se = 4;
4306 adev->gfx.config.max_texture_channel_caches = 16;
4307 adev->gfx.config.max_gprs = 256;
4308 adev->gfx.config.max_gs_threads = 32;
4309 adev->gfx.config.max_hw_contexts = 8;
4311 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4312 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4313 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4314 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4315 gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
4318 adev->gfx.config.max_shader_engines = 1;
4319 adev->gfx.config.max_tile_pipes = 4;
4320 adev->gfx.config.max_cu_per_sh = 8;
4321 adev->gfx.config.max_backends_per_se = 2;
4322 adev->gfx.config.max_sh_per_se = 1;
4323 adev->gfx.config.max_texture_channel_caches = 4;
4324 adev->gfx.config.max_gprs = 256;
4325 adev->gfx.config.max_gs_threads = 16;
4326 adev->gfx.config.max_hw_contexts = 8;
4328 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4329 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4330 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4331 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4332 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4337 adev->gfx.config.max_shader_engines = 1;
4338 adev->gfx.config.max_tile_pipes = 2;
4339 adev->gfx.config.max_cu_per_sh = 2;
4340 adev->gfx.config.max_sh_per_se = 1;
4341 adev->gfx.config.max_backends_per_se = 1;
4342 adev->gfx.config.max_texture_channel_caches = 2;
4343 adev->gfx.config.max_gprs = 256;
4344 adev->gfx.config.max_gs_threads = 16;
4345 adev->gfx.config.max_hw_contexts = 8;
4347 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4348 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4349 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4350 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4351 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4355 adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
4356 mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
4358 adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
4359 MC_ARB_RAMCFG, NOOFBANK);
4360 adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
4361 MC_ARB_RAMCFG, NOOFRANKS);
4363 adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
4364 adev->gfx.config.mem_max_burst_length_bytes = 256;
4365 if (adev->flags & AMD_IS_APU) {
4366 /* Get memory bank mapping mode. */
4367 tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
4368 dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
4369 dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
4371 tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
4372 dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
4373 dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
4375 /* Validate settings in case only one DIMM installed. */
4376 if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
4377 dimm00_addr_map = 0;
4378 if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
4379 dimm01_addr_map = 0;
4380 if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
4381 dimm10_addr_map = 0;
4382 if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
4383 dimm11_addr_map = 0;
4385 /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
4386 /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
4387 if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
4388 adev->gfx.config.mem_row_size_in_kb = 2;
4390 adev->gfx.config.mem_row_size_in_kb = 1;
4392 tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT;
4393 adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
4394 if (adev->gfx.config.mem_row_size_in_kb > 4)
4395 adev->gfx.config.mem_row_size_in_kb = 4;
4397 /* XXX use MC settings? */
4398 adev->gfx.config.shader_engine_tile_size = 32;
4399 adev->gfx.config.num_gpus = 1;
4400 adev->gfx.config.multi_gpu_tile_size = 64;
4402 /* fix up row size */
4403 gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK;
4404 switch (adev->gfx.config.mem_row_size_in_kb) {
4407 gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4410 gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4413 gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4416 adev->gfx.config.gb_addr_config = gb_addr_config;
4419 static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
4420 int mec, int pipe, int queue)
4424 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
4429 ring->queue = queue;
4431 ring->ring_obj = NULL;
4432 ring->use_doorbell = true;
4433 ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
4434 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
4436 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
4437 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
4440 /* type-2 packets are deprecated on MEC, use type-3 instead */
4441 r = amdgpu_ring_init(adev, ring, 1024,
4442 &adev->gfx.eop_irq, irq_type);
4450 static int gfx_v7_0_sw_init(void *handle)
4452 struct amdgpu_ring *ring;
4453 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4454 int i, j, k, r, ring_id;
4456 switch (adev->asic_type) {
4458 adev->gfx.mec.num_mec = 2;
4465 adev->gfx.mec.num_mec = 1;
4468 adev->gfx.mec.num_pipe_per_mec = 4;
4469 adev->gfx.mec.num_queue_per_pipe = 8;
4472 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
4476 /* Privileged reg */
4477 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 184,
4478 &adev->gfx.priv_reg_irq);
4482 /* Privileged inst */
4483 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 185,
4484 &adev->gfx.priv_inst_irq);
4488 gfx_v7_0_scratch_init(adev);
4490 r = gfx_v7_0_init_microcode(adev);
4492 DRM_ERROR("Failed to load gfx firmware!\n");
4496 r = adev->gfx.rlc.funcs->init(adev);
4498 DRM_ERROR("Failed to init rlc BOs!\n");
4502 /* allocate mec buffers */
4503 r = gfx_v7_0_mec_init(adev);
4505 DRM_ERROR("Failed to init MEC BOs!\n");
4509 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4510 ring = &adev->gfx.gfx_ring[i];
4511 ring->ring_obj = NULL;
4512 sprintf(ring->name, "gfx");
4513 r = amdgpu_ring_init(adev, ring, 1024,
4514 &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
4519 /* set up the compute queues - allocate horizontally across pipes */
4521 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
4522 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
4523 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
4524 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
4527 r = gfx_v7_0_compute_ring_init(adev,
4538 adev->gfx.ce_ram_size = 0x8000;
4540 gfx_v7_0_gpu_early_init(adev);
4545 static int gfx_v7_0_sw_fini(void *handle)
4547 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4550 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4551 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
4552 for (i = 0; i < adev->gfx.num_compute_rings; i++)
4553 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
4555 gfx_v7_0_cp_compute_fini(adev);
4556 amdgpu_gfx_rlc_fini(adev);
4557 gfx_v7_0_mec_fini(adev);
4558 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
4559 &adev->gfx.rlc.clear_state_gpu_addr,
4560 (void **)&adev->gfx.rlc.cs_ptr);
4561 if (adev->gfx.rlc.cp_table_size) {
4562 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
4563 &adev->gfx.rlc.cp_table_gpu_addr,
4564 (void **)&adev->gfx.rlc.cp_table_ptr);
4566 gfx_v7_0_free_microcode(adev);
4571 static int gfx_v7_0_hw_init(void *handle)
4574 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4576 gfx_v7_0_constants_init(adev);
4579 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
4581 r = adev->gfx.rlc.funcs->resume(adev);
4585 r = gfx_v7_0_cp_resume(adev);
4592 static int gfx_v7_0_hw_fini(void *handle)
4594 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4596 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4597 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4598 gfx_v7_0_cp_enable(adev, false);
4599 adev->gfx.rlc.funcs->stop(adev);
4600 gfx_v7_0_fini_pg(adev);
4605 static int gfx_v7_0_suspend(void *handle)
4607 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4609 return gfx_v7_0_hw_fini(adev);
4612 static int gfx_v7_0_resume(void *handle)
4614 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4616 return gfx_v7_0_hw_init(adev);
4619 static bool gfx_v7_0_is_idle(void *handle)
4621 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4623 if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
4629 static int gfx_v7_0_wait_for_idle(void *handle)
4633 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4635 for (i = 0; i < adev->usec_timeout; i++) {
4636 /* read MC_STATUS */
4637 tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK;
4646 static int gfx_v7_0_soft_reset(void *handle)
4648 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
4650 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4653 tmp = RREG32(mmGRBM_STATUS);
4654 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4655 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4656 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4657 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4658 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4659 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
4660 grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK |
4661 GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK;
4663 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4664 grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK;
4665 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
4669 tmp = RREG32(mmGRBM_STATUS2);
4670 if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
4671 grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
4674 tmp = RREG32(mmSRBM_STATUS);
4675 if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
4676 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
4678 if (grbm_soft_reset || srbm_soft_reset) {
4680 gfx_v7_0_fini_pg(adev);
4681 gfx_v7_0_update_cg(adev, false);
4684 adev->gfx.rlc.funcs->stop(adev);
4686 /* Disable GFX parsing/prefetching */
4687 WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
4689 /* Disable MEC parsing/prefetching */
4690 WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
4692 if (grbm_soft_reset) {
4693 tmp = RREG32(mmGRBM_SOFT_RESET);
4694 tmp |= grbm_soft_reset;
4695 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4696 WREG32(mmGRBM_SOFT_RESET, tmp);
4697 tmp = RREG32(mmGRBM_SOFT_RESET);
4701 tmp &= ~grbm_soft_reset;
4702 WREG32(mmGRBM_SOFT_RESET, tmp);
4703 tmp = RREG32(mmGRBM_SOFT_RESET);
4706 if (srbm_soft_reset) {
4707 tmp = RREG32(mmSRBM_SOFT_RESET);
4708 tmp |= srbm_soft_reset;
4709 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
4710 WREG32(mmSRBM_SOFT_RESET, tmp);
4711 tmp = RREG32(mmSRBM_SOFT_RESET);
4715 tmp &= ~srbm_soft_reset;
4716 WREG32(mmSRBM_SOFT_RESET, tmp);
4717 tmp = RREG32(mmSRBM_SOFT_RESET);
4719 /* Wait a little for things to settle down */
4725 static void gfx_v7_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4726 enum amdgpu_interrupt_state state)
4731 case AMDGPU_IRQ_STATE_DISABLE:
4732 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4733 cp_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4734 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4736 case AMDGPU_IRQ_STATE_ENABLE:
4737 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4738 cp_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4739 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4746 static void gfx_v7_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4748 enum amdgpu_interrupt_state state)
4750 u32 mec_int_cntl, mec_int_cntl_reg;
4753 * amdgpu controls only the first MEC. That's why this function only
4754 * handles the setting of interrupts for this specific MEC. All other
4755 * pipes' interrupts are set by amdkfd.
4761 mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
4764 mec_int_cntl_reg = mmCP_ME1_PIPE1_INT_CNTL;
4767 mec_int_cntl_reg = mmCP_ME1_PIPE2_INT_CNTL;
4770 mec_int_cntl_reg = mmCP_ME1_PIPE3_INT_CNTL;
4773 DRM_DEBUG("invalid pipe %d\n", pipe);
4777 DRM_DEBUG("invalid me %d\n", me);
4782 case AMDGPU_IRQ_STATE_DISABLE:
4783 mec_int_cntl = RREG32(mec_int_cntl_reg);
4784 mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4785 WREG32(mec_int_cntl_reg, mec_int_cntl);
4787 case AMDGPU_IRQ_STATE_ENABLE:
4788 mec_int_cntl = RREG32(mec_int_cntl_reg);
4789 mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4790 WREG32(mec_int_cntl_reg, mec_int_cntl);
4797 static int gfx_v7_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4798 struct amdgpu_irq_src *src,
4800 enum amdgpu_interrupt_state state)
4805 case AMDGPU_IRQ_STATE_DISABLE:
4806 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4807 cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
4808 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4810 case AMDGPU_IRQ_STATE_ENABLE:
4811 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4812 cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
4813 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4822 static int gfx_v7_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4823 struct amdgpu_irq_src *src,
4825 enum amdgpu_interrupt_state state)
4830 case AMDGPU_IRQ_STATE_DISABLE:
4831 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4832 cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
4833 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4835 case AMDGPU_IRQ_STATE_ENABLE:
4836 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4837 cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
4838 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4847 static int gfx_v7_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4848 struct amdgpu_irq_src *src,
4850 enum amdgpu_interrupt_state state)
4853 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
4854 gfx_v7_0_set_gfx_eop_interrupt_state(adev, state);
4856 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4857 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4859 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4860 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4862 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4863 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4865 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4866 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4868 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4869 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4871 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4872 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4874 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4875 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4877 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4878 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4886 static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
4887 struct amdgpu_irq_src *source,
4888 struct amdgpu_iv_entry *entry)
4891 struct amdgpu_ring *ring;
4894 DRM_DEBUG("IH: CP EOP\n");
4895 me_id = (entry->ring_id & 0x0c) >> 2;
4896 pipe_id = (entry->ring_id & 0x03) >> 0;
4899 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4903 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4904 ring = &adev->gfx.compute_ring[i];
4905 if ((ring->me == me_id) && (ring->pipe == pipe_id))
4906 amdgpu_fence_process(ring);
4913 static void gfx_v7_0_fault(struct amdgpu_device *adev,
4914 struct amdgpu_iv_entry *entry)
4916 struct amdgpu_ring *ring;
4920 me_id = (entry->ring_id & 0x0c) >> 2;
4921 pipe_id = (entry->ring_id & 0x03) >> 0;
4924 drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
4928 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4929 ring = &adev->gfx.compute_ring[i];
4930 if ((ring->me == me_id) && (ring->pipe == pipe_id))
4931 drm_sched_fault(&ring->sched);
4937 static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev,
4938 struct amdgpu_irq_src *source,
4939 struct amdgpu_iv_entry *entry)
4941 DRM_ERROR("Illegal register access in command stream\n");
4942 gfx_v7_0_fault(adev, entry);
4946 static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev,
4947 struct amdgpu_irq_src *source,
4948 struct amdgpu_iv_entry *entry)
4950 DRM_ERROR("Illegal instruction in command stream\n");
4951 // XXX soft reset the gfx block only
4952 gfx_v7_0_fault(adev, entry);
4956 static int gfx_v7_0_set_clockgating_state(void *handle,
4957 enum amd_clockgating_state state)
4960 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4962 if (state == AMD_CG_STATE_GATE)
4965 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
4966 /* order matters! */
4968 gfx_v7_0_enable_mgcg(adev, true);
4969 gfx_v7_0_enable_cgcg(adev, true);
4971 gfx_v7_0_enable_cgcg(adev, false);
4972 gfx_v7_0_enable_mgcg(adev, false);
4974 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
4979 static int gfx_v7_0_set_powergating_state(void *handle,
4980 enum amd_powergating_state state)
4983 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4985 if (state == AMD_PG_STATE_GATE)
4988 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4989 AMD_PG_SUPPORT_GFX_SMG |
4990 AMD_PG_SUPPORT_GFX_DMG |
4992 AMD_PG_SUPPORT_GDS |
4993 AMD_PG_SUPPORT_RLC_SMU_HS)) {
4994 gfx_v7_0_update_gfx_pg(adev, gate);
4995 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4996 gfx_v7_0_enable_cp_pg(adev, gate);
4997 gfx_v7_0_enable_gds_pg(adev, gate);
5004 static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
5006 .early_init = gfx_v7_0_early_init,
5007 .late_init = gfx_v7_0_late_init,
5008 .sw_init = gfx_v7_0_sw_init,
5009 .sw_fini = gfx_v7_0_sw_fini,
5010 .hw_init = gfx_v7_0_hw_init,
5011 .hw_fini = gfx_v7_0_hw_fini,
5012 .suspend = gfx_v7_0_suspend,
5013 .resume = gfx_v7_0_resume,
5014 .is_idle = gfx_v7_0_is_idle,
5015 .wait_for_idle = gfx_v7_0_wait_for_idle,
5016 .soft_reset = gfx_v7_0_soft_reset,
5017 .set_clockgating_state = gfx_v7_0_set_clockgating_state,
5018 .set_powergating_state = gfx_v7_0_set_powergating_state,
5021 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
5022 .type = AMDGPU_RING_TYPE_GFX,
5024 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
5025 .support_64bit_ptrs = false,
5026 .get_rptr = gfx_v7_0_ring_get_rptr,
5027 .get_wptr = gfx_v7_0_ring_get_wptr_gfx,
5028 .set_wptr = gfx_v7_0_ring_set_wptr_gfx,
5030 20 + /* gfx_v7_0_ring_emit_gds_switch */
5031 7 + /* gfx_v7_0_ring_emit_hdp_flush */
5032 5 + /* hdp invalidate */
5033 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
5034 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
5035 CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
5036 3 + 4, /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
5037 .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
5038 .emit_ib = gfx_v7_0_ring_emit_ib_gfx,
5039 .emit_fence = gfx_v7_0_ring_emit_fence_gfx,
5040 .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
5041 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
5042 .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
5043 .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
5044 .test_ring = gfx_v7_0_ring_test_ring,
5045 .test_ib = gfx_v7_0_ring_test_ib,
5046 .insert_nop = amdgpu_ring_insert_nop,
5047 .pad_ib = amdgpu_ring_generic_pad_ib,
5048 .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
5049 .emit_wreg = gfx_v7_0_ring_emit_wreg,
5050 .soft_recovery = gfx_v7_0_ring_soft_recovery,
5053 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
5054 .type = AMDGPU_RING_TYPE_COMPUTE,
5056 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
5057 .support_64bit_ptrs = false,
5058 .get_rptr = gfx_v7_0_ring_get_rptr,
5059 .get_wptr = gfx_v7_0_ring_get_wptr_compute,
5060 .set_wptr = gfx_v7_0_ring_set_wptr_compute,
5062 20 + /* gfx_v7_0_ring_emit_gds_switch */
5063 7 + /* gfx_v7_0_ring_emit_hdp_flush */
5064 5 + /* hdp invalidate */
5065 7 + /* gfx_v7_0_ring_emit_pipeline_sync */
5066 CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v7_0_ring_emit_vm_flush */
5067 7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
5068 .emit_ib_size = 7, /* gfx_v7_0_ring_emit_ib_compute */
5069 .emit_ib = gfx_v7_0_ring_emit_ib_compute,
5070 .emit_fence = gfx_v7_0_ring_emit_fence_compute,
5071 .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
5072 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
5073 .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
5074 .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
5075 .test_ring = gfx_v7_0_ring_test_ring,
5076 .test_ib = gfx_v7_0_ring_test_ib,
5077 .insert_nop = amdgpu_ring_insert_nop,
5078 .pad_ib = amdgpu_ring_generic_pad_ib,
5079 .emit_wreg = gfx_v7_0_ring_emit_wreg,
5082 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
5086 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
5087 adev->gfx.gfx_ring[i].funcs = &gfx_v7_0_ring_funcs_gfx;
5088 for (i = 0; i < adev->gfx.num_compute_rings; i++)
5089 adev->gfx.compute_ring[i].funcs = &gfx_v7_0_ring_funcs_compute;
5092 static const struct amdgpu_irq_src_funcs gfx_v7_0_eop_irq_funcs = {
5093 .set = gfx_v7_0_set_eop_interrupt_state,
5094 .process = gfx_v7_0_eop_irq,
5097 static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_reg_irq_funcs = {
5098 .set = gfx_v7_0_set_priv_reg_fault_state,
5099 .process = gfx_v7_0_priv_reg_irq,
5102 static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_inst_irq_funcs = {
5103 .set = gfx_v7_0_set_priv_inst_fault_state,
5104 .process = gfx_v7_0_priv_inst_irq,
5107 static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev)
5109 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
5110 adev->gfx.eop_irq.funcs = &gfx_v7_0_eop_irq_funcs;
5112 adev->gfx.priv_reg_irq.num_types = 1;
5113 adev->gfx.priv_reg_irq.funcs = &gfx_v7_0_priv_reg_irq_funcs;
5115 adev->gfx.priv_inst_irq.num_types = 1;
5116 adev->gfx.priv_inst_irq.funcs = &gfx_v7_0_priv_inst_irq_funcs;
5119 static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev)
5121 /* init asci gds info */
5122 adev->gds.gds_size = RREG32(mmGDS_VMID0_SIZE);
5123 adev->gds.gws_size = 64;
5124 adev->gds.oa_size = 16;
5125 adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID);
5129 static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
5131 int i, j, k, counter, active_cu_number = 0;
5132 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
5133 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
5134 unsigned disable_masks[4 * 2];
5137 if (adev->flags & AMD_IS_APU)
5140 ao_cu_num = adev->gfx.config.max_cu_per_sh;
5142 memset(cu_info, 0, sizeof(*cu_info));
5144 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
5146 mutex_lock(&adev->grbm_idx_mutex);
5147 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
5148 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
5152 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
5154 gfx_v7_0_set_user_cu_inactive_bitmap(
5155 adev, disable_masks[i * 2 + j]);
5156 bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
5157 cu_info->bitmap[i][j] = bitmap;
5159 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
5160 if (bitmap & mask) {
5161 if (counter < ao_cu_num)
5167 active_cu_number += counter;
5169 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
5170 cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
5173 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
5174 mutex_unlock(&adev->grbm_idx_mutex);
5176 cu_info->number = active_cu_number;
5177 cu_info->ao_cu_mask = ao_cu_mask;
5178 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
5179 cu_info->max_waves_per_simd = 10;
5180 cu_info->max_scratch_slots_per_cu = 32;
5181 cu_info->wave_front_size = 64;
5182 cu_info->lds_size = 64;
5185 const struct amdgpu_ip_block_version gfx_v7_0_ip_block =
5187 .type = AMD_IP_BLOCK_TYPE_GFX,
5191 .funcs = &gfx_v7_0_ip_funcs,
5194 const struct amdgpu_ip_block_version gfx_v7_1_ip_block =
5196 .type = AMD_IP_BLOCK_TYPE_GFX,
5200 .funcs = &gfx_v7_0_ip_funcs,
5203 const struct amdgpu_ip_block_version gfx_v7_2_ip_block =
5205 .type = AMD_IP_BLOCK_TYPE_GFX,
5209 .funcs = &gfx_v7_0_ip_funcs,
5212 const struct amdgpu_ip_block_version gfx_v7_3_ip_block =
5214 .type = AMD_IP_BLOCK_TYPE_GFX,
5218 .funcs = &gfx_v7_0_ip_funcs,